query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Clears all the variables from the workspace of the spyder application.
def clear_all(): gl = globals().copy() for var in gl: if var[0] == '_': continue if 'func' in str(globals()[var]): continue if 'module' in str(globals()[var]): continue del globals()[var]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self.vars = []", "def controlVariablesClear() :\n s.clearScriptAll()", "def ClearWorkspace(workspace_name=''):\n ClearWorkspaceCC(workspace_name)", "def clear(self):\n self.globalDefines = {}\n self.axiom = self.setAxiomFromString(\"\")\n self.clearProductions()\n self.niterations = 1\n self.resultPString = None", "def ClearWorkspace(workspace_name=''):\n _C.ClearWorkspace(workspace_name)", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def clear_locals(self):\n self._locals = dict()", "def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True", "def _clear_workspace(self):\n\n\t\ttry:\n\t\t\tself.main.workspace.remove(self.main.welcome_note)\n\t\texcept: pass\n\t\ttry:\n\t\t\tself.main.workspace.remove(self.services_view.notebook)\n\t\texcept: pass\n\t\ttry:\n\t\t\tself.main.workspace.remove(self.work.notebook)\n\t\texcept: pass\n\n\t\t\n\t\treturn True", "def _clear_variables( self ):\r\n self.navigation = None\r\n self.resPath = None\r\n self.resolutions = None\r\n self.currentResolution = None\r\n self.resolution = None\r\n for doc in self.include_doc:\r\n try: doc.unlink()\r\n except: pass", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def ResetWorkspace(workspace_name=''):\n ResetWorkspaceCC(workspace_name)", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def ResetWorkspace(workspace_name=''):\n _C.ResetWorkspace(workspace_name)", "def _clear(self):\n self.xi.ravel()[:] = 0\n self.xi_im.ravel()[:] = 0\n self.meanr.ravel()[:] = 0\n self.meanlogr.ravel()[:] = 0\n self.weight.ravel()[:] = 0\n self.npairs.ravel()[:] = 0\n self._varxi = None\n self._cov = None", "def clearAllSettings(self) -> None:\n ...", "def magic_reset(self, parameter_s=''):\n\n ans = raw_input(\n \"Once deleted, variables cannot be recovered. Proceed (y/n)? \")\n if not ans.lower() == 'y':\n print 'Nothing done.'\n return\n for i in self.magic_who_ls():\n del(self.locals[i])", "def remove_variables(self):\n self.variables = []", "def reset(self):\n self.current = self.root\n env = self.env\n obs, infos = env.reset()\n obs = obs[obs.find(\"=\"):] # removes textworld legend\n\n return env, obs, infos", "def clear_workspace(client, workspace=None, filenames=None):\n active_workspace = client.windchill_get_workspace()\n data = {\"workspace\": active_workspace}\n if workspace is not None:\n data[\"workspace\"] = workspace\n if filenames is not None:\n data[\"filenames\"] = filenames\n return client._creoson_post(\"windchill\", \"clear_workspace\", data)", "def reset_context():\n global root_dir\n global wells_list\n global tops_list\n\n del root_dir, wells_list, tops_list\n root_dir = WellsDir(None, 'root')\n wells_list = list()\n tops_list = list()", "def _clean_up_experiment(self):\n if self.module_name == \"keras\":\n K.clear_session()", "def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")", "def reset(self):\n self._varstate = None\n self.frozen = False", "def reset(self):\r\n self.env.reset()\r\n return self.env.get_obs()", "def clear():\n\n os.system(\"clear\")", "def clear(self):\n if self.debug:\n print(\"DIMS cleared\")\n self.sp_dicts.clear()", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def clean():\n if system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')", "def ClearTools(self):\r\n\r\n self.Clear()", "def reset(self):\n return self.env.reset()", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset(self):\r\n if os.path.isfile(os.path.join(self._var_dir, 'SETUP')):\r\n os.remove(os.path.join(self._var_dir, 'SETUP'))", "def cleanWorkspace(self):\n self.window.labelMessage.setText(\"\")\n\n if self.inspectinoAnalyzer:\n del self.analyzerWidget\n self.inspectinoAnalyzer = False\n\n for index in reversed(range(self.window.layoutDepthermInpesction.count())):\n layoutItem = self.window.layoutDepthermInpesction.itemAt(index)\n widgetToRemove = layoutItem.widget()\n print(\"found widget: \" + str(widgetToRemove))\n widgetToRemove.setParent(None)\n self.window.layoutDepthermInpesction.removeWidget(widgetToRemove)", "def clear_all_output_settings(self):\n self.general_information = []\n self.object_information = []\n self.camera_information = []\n self.light_information = []\n self.bounding_box_information = []", "def reset(self):\n if hasattr(self, \"W\"):\n del self.W\n if hasattr(self, \"T\"):\n del self.T\n if hasattr(self, \"P\"):\n del self.P", "def reset():\n _runtime.reset()", "def reset(self):\n self.satisfiability = Satisfiability.UNTESTED\n self.model = None\n self.unsatCore = []", "def reset_env(self):\n return self.env.reset()", "def __clearAllFolds(self):\n aw = self.activeWindow()\n if aw:\n aw.clearFolds()", "def clear(context):\n\n log.log(level=0, msg=\"clear !\")\n log.log(level=0, msg=\"controller set\")\n log.log(level=0, msg=\"mesh set\")\n log.log(level=0, msg=\"joint set\")\n\n # node = pm.PyNode(node)\n #\n # for script in info[const.POSTSCRIPTS]:\n # mod = importlib.import_module(script)\n # mod.PrePost.run(context)", "def _clear_context():\n for var in [x for x in __context__ if x.startswith(\"lxc.\")]:\n log.trace(\"Clearing __context__['%s']\", var)\n __context__.pop(var, None)", "def reset(self):\n self.dims.clear()\n self.xlabels.clear()\n self.annotators.clear()\n self._figTitle = None\n self.tbmTitle = None\n self._isSubplot = False\n self._universal_xlabel = False\n self._plotter = None\n self.Nsp = 0", "def reset(self):\n self.settings = None\n self.sublime_settings = None\n self.settings_base = \"Javatar.sublime-settings\"\n self.sublime_base = \"Preferences.sublime-settings\"", "def reset():\n Vessel.reset_instances()", "def clear_programs(self):\n self._programs = \"\"", "def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []", "def cls(self):\n os.system('clear')", "def reset(self):\n\n logger.info('Removed existing OpenMM engine.')\n self._simulation = None", "def reset(self):\n self.in_compact_method = False\n self.in_setup = False\n self.autoname_cursor = dict()", "def clear_precalc(self):\n self.Dmat_obs = None\n self.Dmat_obs_b = None\n self.dSdqb_mat1 = None\n self.Mmat = None\n self.Mmat_mix = None\n self.mll = None", "def clear_cxt_vars(cxt):\n if hasattr(cxt, '_cl'):\n del cxt._cl\n if hasattr(cxt, '_pairs'):\n del cxt._pairs", "def clear_datastore():\n local('lib/remote_api_shell.py tweetlocker -p /_/shell -c '\n '\"from lib.utils import clear_datastore; clear_datastore()\"',\n capture=False)", "def clear():\n sub.call('cls', shell=True)", "def clear():\n\n # windows \n if os.name == \"nt\": \n _ = os.system(\"cls\") \n # mac and linux\n else: \n _ = os.system(\"clear\")", "def reset(self) -> VecEnvObs:\n raise NotImplementedError()", "def env_reset(self):\n state = self.env.reset()\n return self.feature_extractor.extract_features(state)", "def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')", "def _clear(self):\n self._commands = []\n self._activeMacros = []\n self._index = 0\n self._emitSignals()\n self._inUndoRedo = False", "def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0", "def clear(self):\n self.recorders = set([])\n self.reset()\n\n # Stop any currently running SpiNNaker application\n self.stop()", "def clear(self):\n self._args = None\n self._oparser = None\n self._namespace = None\n self._mutable_ns = None\n # Keep _mutate_hooks\n self._validate_default_values = False\n self.unregister_opts(self._config_opts)\n for group in self._groups.values():\n group._clear()", "def reset(self):\n Simulation.reset(self)", "def clear():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear():\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear(self):\n self.SetPoint = 0.0\n \n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n \n self.windup_guard = 20.0\n self.output = 0.0", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def clean_workspace(self):\n try:\n if self.cleanup_resources.get('srpm_path'):\n os.remove(self.cleanup_resources.get('srpm_path'))\n if self.cleanup_resources.get('platform_pot_path'):\n os.remove(self.cleanup_resources.get('platform_pot_path'))\n if self.cleanup_resources.get('src_tar_dir'):\n rmtree(self.cleanup_resources.get('src_tar_dir'), ignore_errors=True)\n if self.cleanup_resources.get('extract_dir'):\n rmtree(self.cleanup_resources.get('extract_dir'), ignore_errors=True)\n if self.cleanup_resources.get('download_dir'):\n rmtree(self.cleanup_resources.get('download_dir'), ignore_errors=True)\n except OSError as e:\n self.app_logger('ERROR', \"Failed to clean sandbox! Due to %s\" % e)", "def __reset_variables(self):\r\n self.__running = True", "def clear(self):\n self.call('clear')", "def resetSim(self):\n self.powers = []", "def clear_data(self):\n\t\tfor attr in self.coeff_vectors.iterkeys():\n\t\t\tdel self.coeff_vectors[attr][:]\n\t\tself.coeff_vectors.clear()\n\t\tself.coeff_vectors = None", "def clear(self):\n self.knownStrings.clear()", "def cleanup(self):\n\n self.PLC['1'].set_plc_mode(0)\n self.PLC['1'].plc_clear('all')\n super(Test200SmartSanityClear005, self).cleanup()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset(self):\n self.solver = None", "def _reset(self, env_id: np.ndarray) -> None:", "def reset(self, env):\n self._env = env\n return", "def reset(self, window):\n self.__close_preview(window)\n self.__clear_context()", "def reset(self):\n\n game.reset()\n sm.get_screen('game_screen').reset()", "def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)", "def clear():", "def clear_data_structure():\n \n packetFilter = \"\"\n # Reset to its default value\n packetFilterOperator = \"AND\"\n \n del actions[:]\n variables.clear()\n symbol_table.clear()", "def clear(self):\r\n self.SetPoint = 0.0\r\n\r\n self.PTerm = 0.0\r\n self.ITerm = 0.0\r\n self.DTerm = 0.0\r\n self.last_error = 0.0\r\n\r\n # Windup Guard\r\n self.int_error = 0.0\r\n self.windup_guard = 20.0\r\n\r\n self.output = 0.0", "def clear_all(self):\n raise NotImplementedError", "def clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def reset():\n local('cd {{ project_name }} && \\\n rm -rf static && rm -rf gzip && rm -rf build')", "def reset(self):\n self.mol.RHF(doPrint=False)\n self.dipole = []\n self.angmom = []\n self.Energy = []\n self.shape = []", "def reset(self):\n self.history = []\n self.frame = {}\n self.params = {}\n self.form = {}" ]
[ "0.70930827", "0.67948645", "0.6680693", "0.6621833", "0.65619415", "0.6522764", "0.6467503", "0.6458815", "0.63563365", "0.6324468", "0.63060844", "0.6302263", "0.62758696", "0.62447083", "0.6243702", "0.6192215", "0.6141518", "0.6140391", "0.6136033", "0.6091357", "0.6073818", "0.6073532", "0.6068858", "0.6050863", "0.60349905", "0.6031521", "0.6030718", "0.59807545", "0.5971055", "0.59702617", "0.59559053", "0.59378815", "0.5928251", "0.5907807", "0.5903399", "0.589795", "0.58973765", "0.5876241", "0.58748466", "0.5874639", "0.58602166", "0.5837063", "0.58357686", "0.5835376", "0.5833125", "0.5831485", "0.5830691", "0.58183455", "0.5804061", "0.57864624", "0.5775222", "0.5744486", "0.5742283", "0.5734039", "0.57318944", "0.573142", "0.5709844", "0.5697642", "0.5684199", "0.56838405", "0.56838405", "0.56790745", "0.56770056", "0.5664734", "0.5659179", "0.56544703", "0.5645042", "0.5644757", "0.5644757", "0.5635008", "0.5634969", "0.5629633", "0.5626639", "0.56263447", "0.56258124", "0.56165063", "0.56140035", "0.56048363", "0.56032264", "0.5600607", "0.5595104", "0.55907804", "0.55790335", "0.55682546", "0.55674887", "0.556532", "0.55613935", "0.5561374", "0.5560542", "0.5559542", "0.5556506", "0.55510753", "0.55492747", "0.55492747", "0.55492747", "0.55485636", "0.55371284", "0.55357254" ]
0.64478254
9
FresnelReflection takes the smallest angle between the ray direction and the normal. Thus the flipped normal will also work.
def test_antinormal_reflection(self): n1 = 1.0 n2 = 1.5 normal = (0.0, 0.0, -1.0) angle = 0.0 ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None) fresnel = FresnelReflection() assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04) new_ray = fresnel.transform(ray, {"normal": normal}) assert np.allclose(flip(ray.direction), new_ray.direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def refract(self, ray, rho):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n incidence = dot(-ray.direction, normal)\n complement = sqrt(1.0 - (1.0 - incidence**2) / rho**2)\n return Ray((ray.direction / rho +\n (incidence / rho - complement) * normal), ray.position)", "def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))", "def reflect_step(r0, step, intersection, normal_vector, step_length):\n \n # Calculate distance to intersection point and update step length\n step_length -= math.sqrt((r0[0] - intersection[0])**2 + (r0[1] - intersection[1])**2 + (r0[2] - intersection[2])**2)\n \n # Calculate reflection off the surface\n reflected_x = -r0[0] + 2*intersection[0] + 2*normal_vector[0]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n reflected_y = -r0[1] + 2*intersection[1] + 2*normal_vector[1]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n reflected_z = -r0[2] + 2*intersection[2] + 2*normal_vector[2]*((r0[0] - intersection[0])*normal_vector[0] + (r0[1] - intersection[1])*normal_vector[1] + (r0[2] - intersection[2])*normal_vector[2])\n \n # Update step direction and spin position\n step[0] = reflected_x - intersection[0]\n step[1] = reflected_y - intersection[1]\n step[2] = reflected_z - intersection[2]\n normalizing_factor = math.sqrt(step[0]**2+step[1]**2+step[2]**2)\n step[0] /= normalizing_factor \n step[1] /= normalizing_factor \n step[2] /= normalizing_factor \n \n epsilon = 1e-6\n \n r0[0] = intersection[0] + epsilon*step_length*step[0]\n r0[1] = intersection[1] + epsilon*step_length*step[1]\n r0[2] = intersection[2] + epsilon*step_length*step[2]\n \n return", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def reflected(self, normal):\n return self - (2 * normal * self) * normal", "def op_fresnel_reflection(m, theta):\n rho_p = pypolar.fresnel.r_par_amplitude(m, theta)\n rho_s = pypolar.fresnel.r_per_amplitude(m, theta)\n a = abs(rho_s)**2 + abs(rho_p)**2\n b = abs(rho_s)**2 - abs(rho_p)**2\n c = 2 * rho_s * rho_p\n mat = np.array([[a, b, 0, 0],\n [b, a, 0, 0],\n [0, 0, c, 0],\n [0, 0, 0, c]])\n return 0.5 * mat", "def get_focal_point(patches, shell_point, num_rays=20):\n focal_point = Point3D(0.0, 0.0, 0.0)\n for patch in patches:\n #create a bunch of parallel rays coming from the eye\n ray_vector = normalize(shell_point)\n \n ##TODO: remove me\n #ray_vector = normalize(patch.shell_point)\n \n ray_rotation = numpy.zeros((3, 3))\n optics.rotation_matrix.R_2vect(ray_rotation, PRINCIPAL_RAY, ray_vector)\n rays = []\n for x in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n for y in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n start_point = ray_rotation.dot(Point3D(x, y, 0.0))\n rays.append(Ray(start_point, start_point + ray_vector))\n \n #find the point such that the spot size is minimized on the screen.\n #can average the normal of the reflected rays to get approximately where the screen goes\n #then iteratively try different distances until we've minimized the spot size there\n focal_point = Point3D(0.0, 0.0, 0.0)\n reflected_rays = [ray for ray in patch.reflect_rays_no_bounds(rays) if ray != None]\n approximate_screen_normal = sum([normalize(ray.start - ray.end) for ray in reflected_rays]) / len(reflected_rays)\n if optics.debug.PATCH_FOCAL_REFLECTIONS:\n #TODO: all rays don't come from the origin. draw all rays from their actual start points, and draw non-reflected rays going past the surface\n #also, only draw the part of the surface that is real and should be reflected from\n axes = matplotlib.pyplot.subplot(111, projection='3d')\n size = 5\n num_points = 10\n x, y = numpy.meshgrid(numpy.linspace(-size, size, num_points), numpy.linspace(-size, size, num_points))\n axes.scatter(x, y, patch.poly.get_z_for_plot(x, y), c='r', marker='o').set_label('patch')\n for ray in reflected_rays:\n debug_dist = 2*numpy.linalg.norm(ORIGIN - ray.start)\n rays_to_draw = numpy.array([\n patch.poly_space.point_to_space(ORIGIN),\n patch.poly_space.point_to_space(ray.start),\n patch.poly_space.point_to_space(debug_dist * normalize(ray.end-ray.start) + ray.start)\n ])\n axes.plot(rays_to_draw[:, 0], rays_to_draw[:, 1], rays_to_draw[:, 2], label=\"ray\")\n axes.set_xlabel('X')\n axes.set_ylabel('Y')\n axes.set_zlabel('Z')\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n def calculate_spot_size(distance):\n \"\"\"\n :returns: average distance from the central point for the plane at this distance\n \"\"\"\n screen_plane = Plane(distance * approximate_screen_normal * -1.0 + shell_point, approximate_screen_normal)\n points = []\n for ray in reflected_rays:\n points.append(screen_plane.intersect_line(ray.start, ray.end))\n average_point = sum(points) / len(points)\n errors = [numpy.linalg.norm(p - average_point) for p in points]\n if optics.debug.PATCH_FOCAL_SPOT_SIZE:\n #use coordinate space to move everything to the xy plane\n space = CoordinateSpace(screen_plane._point, screen_plane._normal)\n transformed_points = numpy.array([space.point_to_space(p) for p in points])\n matplotlib.pyplot.plot(transformed_points[:, 0], transformed_points[:, 1], \"r\", linestyle='None', marker='o', label=\"rays at %s\" % (distance))\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n #keep a fixed scale to x and y so that each graph can be compared with the previous\n #should probably print the errors as well\n print errors\n print sum(errors) / len(errors)\n return sum(errors) / len(errors)\n previous_distance = numpy.linalg.norm(patch.shell_point - patch.screen_point)\n min_dist = previous_distance * 0.9\n max_dist = previous_distance * 1.1\n num_iterations = 20\n tolerance = 0.0001\n best_dist = scipy.optimize.fminbound(calculate_spot_size, min_dist, max_dist, maxfun=num_iterations, xtol=tolerance, full_output=False, disp=0)\n focal_point += best_dist * approximate_screen_normal * -1.0 + shell_point\n return focal_point / len(patches)", "def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field", "def propagate(self, ray, index_0, index_1):\n if self._reflective:\n return self.reflect(ray)\n else:\n return self.refract(ray, index_1/index_0)", "def far_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n if self.body.fshape == 'p':\n d = f - n\n # far face dimensions\n l, r, b, t = [(i * d) / n + i for i in (l, r, b, t)]\n face = gt.Plin((l, b, -f), (r, b, -f), (r, t, -f), (l, t, -f))\n return pln.TM * face", "def Fresnel(n1,n2,theta_i):\n\n if np.all(theta_i > 2*np.pi) or isinstance(n1,int) or isinstance(n2,int):\n sys.exit(\"Input the incident angle in radians and the refractive indices as floating points.\")\n # reflection angle is equal to incident angle\n theta_r = theta_i\n # Snell's law for the angle of refraction (transmission)\n theta_t = np.arcsin((n1/float(n2))*np.sin(theta_i))\n # P-Polarized\n Rs = (n1*np.cos(theta_i)-n2*np.cos(theta_t))/(n1*np.cos(theta_i)+n2*np.cos(theta_t))\n Ts = (2*n1*np.cos(theta_i))/(n1*np.cos(theta_i)+n2*np.cos(theta_t))\n # S-Polarized\n Rp = (n2*np.cos(theta_i)-n1*np.cos(theta_t))/(n2*np.cos(theta_i)+n1*np.cos(theta_t))\n Tp = (2*n1*np.cos(theta_i))/(n2*np.cos(theta_i)+n1*np.cos(theta_t))\n # Brewster's Angle\n theta_b = np.arctan(n2/n1)\n # Total Internal Reflection\n theta_tir = np.arcsin(n2/n1)\n return theta_r,theta_t,theta_b,theta_tir,Rp,Tp,Rs,Ts", "def _compute_pixel_ray_direction(\n u: float, v: float, fx: float, fy: float, img_w: int, img_h: int\n) -> NDArrayFloat:\n if not np.isclose(fx, fy, atol=1e-3):\n raise ValueError(\n f\"Focal lengths in the x and y directions must match: {fx} != {fy}\"\n )\n\n # approximation for principal point\n px = img_w / 2\n py = img_h / 2\n\n # the camera coordinate frame (where Z is out, x is right, y is down).\n\n # compute offset from the center\n x_center_offs = u - px\n y_center_offs = v - py\n\n ray_dir: NDArrayFloat = np.array([x_center_offs, y_center_offs, fx])\n ray_dir /= np.linalg.norm(ray_dir)\n return ray_dir", "def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def create_incident_reflected(self):\n # 1. add the normal\n # MObject already constructed\n\n # 2. show an incident ray\n #\n # |<-dx->|\n #\n # + |\n # \\ |\n # \\ |\n # \\ |\n # \\ i| i: incident angle\n # \\ |\n # \\|\n # -------*-------\n #\n delta_x = self.normal_length * math.tan(self.incident_angle)\n self.arrow_incident = Arrow(ORIGIN + -delta_x * RIGHT + self.normal_length * UP,\n ORIGIN,\n color=self.arrow_incident_color,\n stroke_width = 4, buff=0).shift(self.mirror_origin)\n\n\n # 3. show the incident angle\n self.arc_incident = Arc(\n start_angle = PI/2,\n angle = self.incident_angle,\n radius = self.arc_incident_radius,\n color = self.arc_incident_color,\n arc_center = self.mirror_origin\n )\n\n self.text_incident_fig = TextMobject(r\"Incident ray\").set_color(self.tex_theta_in_color).\\\n scale(1.2).move_to(-5.0 * RIGHT + -1.0 * UP)\n theta_in_pos_offset = -0.5 * RIGHT + 1.9 * UP\n self.tex_theta_in = TexMobject(r\"\\theta_{i}\", color=self.arc_incident_color).move_to(self.mirror_origin + theta_in_pos_offset)\n\n\n # 4. show an reflected ray\n #\n # |<-dx->|\n #\n # + | +\n # \\ | /\n # \\ | /\n # \\ | /\n # \\ i|r / i: incident angle\n # \\ | / r: reflected angle\n # \\|/\n # -------*-------\n #\n delta_x = self.normal_length * math.tan(self.reflected_angle)\n self.arrow_reflected = Arrow(ORIGIN,\n ORIGIN + delta_x * RIGHT + self.normal_length * UP,\n color=self.arrow_reflected_color,\n stroke_width = 4, buff=0).shift(self.mirror_origin)\n\n # 5. show the reflected angle\n self.arc_reflected = Arc(\n start_angle = PI/2 - self.reflected_angle,\n angle = self.reflected_angle,\n radius = self.arc_reflected_radius,\n color = self.arc_reflected_color,\n arc_center = self.mirror_origin\n )\n self.text_reflected_fig = TextMobject(r\"Reflected ray\").set_color(self.tex_theta_ref_color).\\\n scale(1.2).move_to(1.0 * RIGHT + -1.0 * UP)\n\n theta_out_pos_offset = 0.5 * RIGHT + 1.9 * UP\n self.tex_theta_ref = TexMobject(r\"\\theta_{r}\", color=self.arc_reflected_color).move_to(self.mirror_origin + theta_out_pos_offset)\n\n self.tex_mirror_reflect = TexMobject(r\"\\text{Specular reflection: }\",\n r\"\\theta_{i}\",\n r\"=\",\n r\"\\theta_{r}\")\n self.tex_mirror_reflect.scale(self.mirror_reflect_tex_scale).move_to(-1.0 * RIGHT + 3.0 * UP)\n self.tex_mirror_reflect[1].set_color(self.arc_incident_color)\n self.tex_mirror_reflect[3].set_color(self.arc_reflected_color)", "def ForsterOrientationFactor(d1, d2, r):\n rn = r / norm(r) ##Normalized distance vector\n d1n = d1/ norm(d1)\n d2n = d2/ norm(d2)\n Factor = 3 * dot(d1n, rn) * dot(d2n, rn) - dot(d1n, d2n)\n return Factor", "def near_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n face = gt.Plin((l, b, -n), (r, b, -n), (r, t, -n), (l, t, -n))\n return pln.TM * face", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def get_funnel(self):\n v_density = torch.distributions.Normal(0,3)\n potential1 = -v_density.log_prob(self.parameters[0])\n x_density = torch.distributions.Normal(0,torch.exp(self.parameters[0])**0.5)\n potential2 = -x_density.log_prob(self.parameters[1:]).sum()\n return potential1 + potential2", "def random_lookat_ray(goal, radius, variance, fov):\n theta1 = 2.*np.pi*np.random.uniform(-fov, fov)\n theta2 = np.arccos(1 - np.random.uniform(0, fov)**2)\n r = radius + variance*np.random.uniform(0,1.)\n x = r*np.cos(theta1)*np.sin(theta2)\n y = r*np.sin(theta1)*np.sin(theta2)\n z = r*np.cos(theta2)\n R = goal[:3,:3]\n point = goal[:3,3] + np.dot(R, np.array([x,y,z]))\n # Find the direction\n direction = -np.dot(R, np.array([x,y,z]))\n direction = tr.unit_vector(direction)\n return orpy.Ray(point, direction)", "def flipNormals(self):\n self.flip = not self.flip", "def test_forward(self) -> None:\n func = self._get_simple_implicit_function()\n\n n_grids, n_points = 10, 9\n raybundle = ImplicitronRayBundle(\n origins=torch.randn(n_grids, 2, 3, 3),\n directions=torch.randn(n_grids, 2, 3, 3),\n lengths=torch.randn(n_grids, 2, 3, n_points),\n xys=0,\n )\n func(raybundle)", "def draw_ray(env, ray, dist=0.03, linewidth=2, color=None):\n if dist < 0:\n newpos = ray.pos() + dist*ray.dir()\n newray = orpy.Ray(newpos, ray.dir())\n else:\n newray = ray\n iktype = orpy.IkParameterizationType.TranslationDirection5D\n ikparam = orpy.IkParameterization(ray, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, dist=dist, linewidth=linewidth,\n coloradd=color)\n return h", "def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None", "def trace(self, ray): # type: (Ray) -> Vector\n hit_object = None\n t = numpy.inf\n\n for scene_object in self.scene.shapes:\n t0 = scene_object.intersect(ray)\n if t0 < t:\n t = t0\n hit_object = scene_object\n\n # if there were no intersections, then return the background colour\n if t == numpy.inf:\n return self.scene.camera.background\n\n hit_point = ray.origin + ray.direction * t\n normal = hit_object.normal(hit_point)\n luminance = 0.0\n\n # perform shading calculations\n for light in self.scene.lights:\n hit_point_to_light = (light.centre - hit_point).normal\n\n #check whether this light contributes to the shading\n in_shadow = False\n for shadower in self.scene.shapes:\n # we don't want to test against itself\n if shadower == hit_object:\n continue\n shadow_ray = Ray(hit_point + normal * 0.0001, hit_point_to_light)\n if shadower.intersect(shadow_ray) < numpy.inf:\n in_shadow = True\n break\n if in_shadow:\n continue\n\n # super simple lambertian lighting model\n luminance += hit_point_to_light.dot(normal) * light.power\n\n # calculate shaded colour - luminance may be over one if there are multiple light sources\n # normally this would be dealt with by HDR and tone mapping but is just clipped\n # in demo ray tracers\n object_colour = hit_object.material.colour * min(luminance, 1.0)\n\n # calculate reflection colour if material has reflectance\n if hit_object.material.reflectance != 0.0 and ray.depth != self.scene.camera.depth:\n reflected_direction = (ray.direction - normal * 2 * (ray.direction.dot(normal))).normal\n # we need to 'translate' the reflection vector away from the hitpoint otherwise\n # we risk intersecting the original hit point again which causes artifacts in the reflection\n reflected_ray = Ray(hit_point + reflected_direction * 0.0001, reflected_direction, ray.depth + 1)\n reflection_colour = self.trace(reflected_ray)\n\n # interpolate shaded colour and reflected colour based on reflectance\n return Vector(*[lerp(object_colour.data[i], reflection_colour.data[i], hit_object.material.reflectance) for i in range(3)])\n\n return object_colour", "def heuristic_2_reflection(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(reflection_available_factor)", "def perspectiveFovLH(field_of_view, aspect, znear, zfar):\n h = 1 / tan(field_of_view / 2)\n w = h / aspect\n m = [\n [w, 0, 0, 0],\n [0, h, 0, 0],\n [0, 0, zfar / (zfar - znear), 1],\n [0, 0, (znear * zfar) / (znear - zfar), 0],\n ]\n return Matrix(m)", "def _rf(self, p):\n return self.faces[:, 0, :] - p # 0 is arbitrary - the other vertices also work", "def ray(self):\n return self._ray", "def rayleigh_norm(th):\n c = np.cos(th)\n c2 = c**2\n a = (c2-1)/(c2+1)\n b = 2*c/(c2+1)\n return np.array([[1,a,0,0],[a,1,0,0],[0,0,b,0],[0,0,0,b]])", "def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12):\n\n ndotu = planeNormal.dot(rayDirection)\n if abs(ndotu) < epsilon:\n raise RuntimeError(\"no intersection or line is within plane\")\n\n w = rayPoint - planePoint\n si = -planeNormal.dot(w) / ndotu\n Psi = w + si * rayDirection + planePoint\n return Psi", "def cutDownAngle_def(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(min(raySortie, diff.norm - rayInter), 20.)\n position += diff\n return goTo(state,position)", "def _trace_ray(self, ray, depth=0, max_depth=5):\n\n color = Color()\n\n if depth >= max_depth:\n return color\n\n intersection = self._get_intersection(ray)\n if intersection is None:\n return color\n\n obj, dist = intersection\n intersection_pt = ray.point_at_dist(dist)\n surface_norm = obj.surface_norm(intersection_pt)\n\n # ambient light\n # color += obj.material.color * obj.material.ambient\n\n point_on_plane = ray.origin + dist*ray.direction\n imgx = point_on_plane.x\n imgy = np.sqrt(point_on_plane.y*point_on_plane.y + point_on_plane.z*point_on_plane.z)\n\n\n '''\n # Nearest Texel\n int_imgx = int(round(imgx))\n int_imgy = int(round(imgy))\n if int_imgx == 512:\n int_imgx = 511\n if int_imgy == 512:\n int_imgy = 511\n color += Color(img[int_imgx, int_imgy, 0], img[int_imgx, int_imgy, 1], img[int_imgx, int_imgy, 2])\n '''\n\n\n # Bilinearly Interpolated Texel\n ceilx = int(math.ceil(imgx))\n ceily = int(math.ceil(imgy))\n floorx = int(math.floor(imgx))\n floory = int(math.floor(imgy))\n if ceilx >= 512:\n ceilx = 511\n if ceily >= 512:\n ceily = 511\n if floorx >= 512:\n floorx = 511\n if floory >= 512:\n floory = 511\n interpolate_x1 = (ceilx - imgx) * (img[ceilx, ceily]) + (imgx - floorx) * (img[floorx, ceily])\n interpolate_x2 = (ceilx - imgx) * (img[ceilx, floory]) + (imgx - floorx) * (img[floorx, floory])\n interpolate_y = (ceily - imgy) * interpolate_x1 + (imgy - floory) * interpolate_x2\n color += Color(interpolate_y[0], interpolate_y[1], interpolate_y[2])\n # print color\n\n\n '''\n # lambert shading\n for light in self.lights:\n pt_to_light_vec = (light - intersection_pt).normalize()\n pt_to_light_ray = Ray(intersection_pt, pt_to_light_vec)\n if self._get_intersection(pt_to_light_ray) is None:\n lambert_intensity = surface_norm * pt_to_light_vec\n if lambert_intensity > 0:\n color += obj.material.color * obj.material.lambert * \\\n lambert_intensity\n\n \n # specular (reflective) light\n reflected_ray = Ray(\n intersection_pt, ray.direction.reflect(surface_norm).normalize())\n color += self._trace_ray(reflected_ray, depth + 1) * \\\n obj.material.specular\n '''\n return color", "def angle_with_membrane_normal(self) -> float:\n memb_normal = np.array([0, 0, 1])\n return np.degrees(np.arccos(np.clip(np.dot(self.dir_vec, memb_normal),\n -1.0, 1.0)))", "def angle_normal(self):\n return atan2(-self.v.x, self.v.y)", "def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def getFov(self):\n return self.light.node().getLens().getFov()", "def CfsVectorsFromAzimuth(fault_azimuth, fault_dip):\n # This is the angle trhough which we rotate n_vec_normal_ref.\n rotation_angle = math.radians(fault_dip - 90)\n fault_azimuth = math.radians(fault_azimuth)\n r_temp_azimuth = np.array([[math.cos(fault_azimuth),\n math.sin(fault_azimuth), 0],\n [-math.sin(fault_azimuth),\n math.cos(fault_azimuth), 0],\n [0, 0, 1]])\n r_temp_dip = np.array([[math.cos(rotation_angle),\n math.sin(rotation_angle), 0],\n [-math.sin(rotation_angle),\n math.cos(rotation_angle), 0],\n [0, 0, 1]])\n n_vec_in_plane = np.dot(r_temp_azimuth, [0, 1, 0])\n n_vec_in_plane = np.dot(r_temp_dip, n_vec_in_plane)\n n_vec_normal = np.dot(r_temp_azimuth, [1, 0, 0])\n n_vec_normal = np.dot(r_temp_dip, n_vec_normal)\n return (n_vec_in_plane, n_vec_normal)", "def fresnel(self, substrate=None, surface=None):\n # Doesn't use ProbeCache, but this routine is not time critical\n Srho, Sirho = (0, 0) if substrate is None else substrate.sld(self)[:2]\n Vrho, Virho = (0, 0) if surface is None else surface.sld(self)[:2]\n if self.back_reflectivity:\n Srho, Vrho = Vrho, Srho\n Sirho, Virho = Virho, Sirho\n if Srho == Vrho:\n Srho = Vrho + 1\n #I = np.ones_like(self.Q)\n I = 1\n calculator = fresnel.Fresnel(rho=Srho*I, irho=Sirho*I,\n Vrho=Vrho*I, Virho=Virho*I)\n return calculator", "def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q", "def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)", "def Fresnel(Fin, z, usepyFFTW = False):\n if z < 0:\n raise ValueError('Fresnel does not support negative z')\n if z == 0:\n Fout = Field.copy(Fin)\n return Fout #return copy to avoid hidden reference/link\n Fout = Field.shallowcopy(Fin) #no need to copy .field as it will be\n # re-created anyway inside _field_Fresnel()\n Fout.field = _field_Fresnel(z, Fout.field, Fout.dx, Fout.lam, Fout._dtype, usepyFFTW)\n Fout._IsGauss=False\n return Fout", "def rotoreflection(axis, angle, origin=(0, 0, 0)):\n rot = SymmOp.from_origin_axis_angle(origin, axis, angle)\n refl = SymmOp.reflection(axis, origin)\n m = np.dot(rot.affine_matrix, refl.affine_matrix)\n return SymmOp(m)", "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def getRay(self, points, normed=False): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def max_front_wheel_angle():", "def cutDownAngle(state, raySortie, rayInter):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = max(raySortie, diff.norm - rayInter)\n position += diff\n return goTo(state,position)", "def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx", "def intersection(self, ray):\n d_proj = self._normal.dot(ray.d)\n if abs(d_proj) < bounds.too_small:\n return -1.0\n s_proj = (self._origin - ray.o).dot(self._normal)\n if d_proj * s_proj < 0.0:\n # ray going away from plane\n return -1.0\n else:\n return s_proj / d_proj", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def test_frustum_planes_ring_cam() -> None:\n near_clip_dist = 6.89 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 1402.4993697398709\n\n # Set \"focal_length_y_px_\"\n fy_px = 1405.1207294310225\n\n # Set \"focal_center_x_px_\"\n cx_px = 957.8471720086527\n\n # Set \"focal_center_y_px_\"\n cy_px = 600.442948946496\n\n camera_name = \"ring_front_right\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)", "def rayleigh(v0):\r\n # Need to sample the angle theta from the phase function\r\n loop_condition = True\r\n while loop_condition:\r\n eps = random.random()*np.pi # Sampled x coordinate from 0 to pi\r\n eta = random.random()*(3/4)*2 # Sampled y coordinate from 0 to max of Rayleigh phase function for unpolarised light\r\n if eta < 3/4*(1 + (np.cos(eps))**2): # Checks if eta is less than the Rayleigh phase function using the angle eps\r\n loop_condition = False\r\n \r\n # Get a new direction vector for the photon\r\n v = scattering_direction(v0, eps)\r\n return v", "def normal_filter(pointcloud, target_orient, threshold):\n\n xyz_points = np.asarray(pointcloud.points)\n xyz_normals = np.asarray(pointcloud.normals)\n projection = xyz_normals.dot(target_orient)\n projection_mask = projection > threshold #\n target_points = xyz_points[projection_mask]\n other_points = xyz_points[~projection_mask]\n target_normals = xyz_normals[projection_mask]\n other_normals = xyz_normals[~projection_mask]\n\n target_points_ = o3d.geometry.PointCloud()\n other_points_ = o3d.geometry.PointCloud()\n target_points_.points = o3d.utility.Vector3dVector(target_points)\n target_points_.normals = o3d.utility.Vector3dVector(target_normals)\n other_points_.points = o3d.utility.Vector3dVector(other_points)\n other_points_.normals = o3d.utility.Vector3dVector(other_normals)\n\n return target_points_, other_points_", "def reflection(normal, origin=(0, 0, 0)):\n # Normalize the normal vector first.\n n = np.array(normal, dtype=float) / np.linalg.norm(normal)\n\n u, v, w = n\n\n translation = np.eye(4)\n translation[0:3, 3] = -np.array(origin)\n\n xx = 1 - 2 * u ** 2\n yy = 1 - 2 * v ** 2\n zz = 1 - 2 * w ** 2\n xy = -2 * u * v\n xz = -2 * u * w\n yz = -2 * v * w\n mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0],\n [0, 0, 0, 1]]\n\n if np.linalg.norm(origin) > 1e-6:\n mirror_mat = np.dot(np.linalg.inv(translation),\n np.dot(mirror_mat, translation))\n return SymmOp(mirror_mat)", "def singleOptic2(n,misalign=np.zeros(6),srcdist=89.61e3+1.5e3,az=100.,\\\n returnRays=False,f=None,\\\n plist=[[0],[0],[0]],\\\n ax=100.):\n #Establish subannulus of rays\n r0 = conic.primrad(8426.,220.,8400.)\n r1 = conic.primrad(8426.+ax,220.,8400.)\n rays = sources.subannulus(r0,r1,az/220.,n,zhat=-1.)\n #Transform to node position\n tran.transform(rays,220,0,0,0,0,0)\n #Set up finite source distance\n raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n l = rays[1]/raydist\n m = rays[2]/raydist\n n = -sqrt(1.-l**2-m**2)\n rays = [raydist,rays[1],rays[2],rays[3],l,m,n,rays[7],rays[8],rays[9]]\n #Align perfectly to beam\n tran.steerX(rays)\n #Apply misalignment\n tran.transform(rays,*misalign)\n #Place mirror\n tran.transform(rays,-220.,0,-8400.,0,0,0)\n## surf.wolterprimarynode(rays,220,8400.)\n surf.primaryLL(rays,220.,8400.,8426.+ax,8426.,az/220.,*plist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.+ax,\\\n rays[3]>8400.))\n tran.itransform(rays,-220.,0.,-8400.,0,0,0)\n #Vignette rays not landing in active mirror area\n ind = np.logical_and(rays[3]>26.,rays[3]<(26.+ax))\n## ind = np.logical_and(np.abs(rays[2])<az/2.,indz)\n rays = tran.vignette(rays,ind=ind)\n #Reverse misalignment\n tran.itransform(rays,*misalign)\n #Reflect and go to surface\n tran.reflect(rays)\n if f is None:\n f = surf.focusI(rays)\n else:\n tran.transform(rays,0,0,f,0,0,0)\n surf.flat(rays)\n #Get centroid\n cx,cy = anal.centroid(rays)\n\n if returnRays is True:\n return rays\n \n return anal.hpd(rays)/abs(f)*180/pi*60**2,f,cx", "def single_ray_depth_estimate(\n ray_voxel_indices,\n ray_to_occupancy_accumulated_pon,\n ray_to_occupancy_pon,\n s\n):\n # Create an index that when passed to a numpy array will return the voxels\n # that this ray passes through\n if ray_voxel_indices.shape[-1] == 3:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1],\n ray_voxel_indices[:, 2]\n )\n else:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1]\n )\n\n # Compute the log of the occupancy_to_ray message for the positive case\n # NOTE: The ray_to_occupancy_accumulated is in log space\n occupancy_to_ray_pon = (\n ray_to_occupancy_accumulated_pon[indices] -\n ray_to_occupancy_pon\n )\n\n # We assume that incoming messages are normalized to 1, thus we need to\n # normalize the occupancy-to-ray message\n max_occupancy_to_ray = np.maximum(0, occupancy_to_ray_pon)\n t1 = np.exp(0.0 - max_occupancy_to_ray)\n t2 = np.exp(occupancy_to_ray_pon - max_occupancy_to_ray)\n\n # Now we normalize the occupancy to ray message for the positive case.\n # NOTE: We only normalize and store the occupancy-to-ray message for the\n # positive case\n # The occupancy_to_ray holds the positive occupancy-to-ray messages for the\n # current ray (not in logspace) from Equation (44) in my report\n occupancy_to_ray = np.clip(\n t2 / (t2 + t1),\n 1e-4,\n 1-1e-4\n )\n\n # Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy\n # 3DV)\n # For the computation of the cumulative product we need\n # the occupancy-to-ray messages for the negative case.\n # We append 1 at the top because for the o_1 voxel this term is equal to 1\n occupancy_to_ray_neg_cumprod = np.hstack([\n [1.], (1 - occupancy_to_ray).cumprod()\n ])\n\n P = occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s\n\n return P / P.sum()", "def get_normalized_direction(self, direction):\n return round(self.normal_joystick_slope * direction + self.normal_joystick_intercept, 2)", "def pseudo_flatfield(img_plane, sigma=5):\n filtered_img = gaussian_filter(img_plane, sigma)\n return img_plane / (filtered_img + 1)", "def get_reflect(image, side):\n if side not in [\"r\", \"l\", \"t\", \"b\", \"rt\", \"rb\", \"lt\", \"lb\"]:\n return 1, None\n try:\n if side == \"r\":\n result = np.zeros((image.shape[0], image.shape[1] * 2 - 1))\n result[:, : image.shape[1]] = image\n result[:, -image.shape[1] :] = image[:, ::-1]\n elif side == \"l\":\n result = np.zeros((image.shape[0], image.shape[1] * 2 - 1))\n result[:, : image.shape[1]] = image[:, ::-1]\n result[:, -image.shape[1] :] = image\n elif side == \"b\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1]))\n result[: image.shape[0], :] = image\n result[-image.shape[0] :, :] = image[::-1]\n elif side == \"t\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1]))\n result[: image.shape[0], :] = image[::-1]\n result[-image.shape[0] :, :] = image\n\n elif side == \"rb\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))\n result[: image.shape[0], : image.shape[1]] = image\n result[: image.shape[0], -image.shape[1] :] = image[:, ::-1]\n result[-image.shape[0] :, : image.shape[1]] = image[::-1, :]\n result[-image.shape[0] :, -image.shape[1] :] = image[::-1, ::-1]\n\n elif side == \"rt\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))\n result[: image.shape[0], : image.shape[1]] = image[::-1, :]\n result[: image.shape[0], -image.shape[1] :] = image[::-1, ::-1]\n result[-image.shape[0] :, : image.shape[1]] = image\n result[-image.shape[0] :, -image.shape[1] :] = image[:, ::-1]\n\n elif side == \"lt\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))\n result[: image.shape[0], : image.shape[1]] = image[::-1, ::-1]\n result[: image.shape[0], -image.shape[1] :] = image[::-1, :]\n result[-image.shape[0] :, : image.shape[1]] = image[:, ::-1]\n result[-image.shape[0] :, -image.shape[1] :] = image\n\n elif side == \"lb\":\n result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))\n result[: image.shape[0], : image.shape[1]] = image[:, ::-1]\n result[: image.shape[0], -image.shape[1] :] = image\n result[-image.shape[0] :, : image.shape[1]] = image[::-1, ::-1]\n result[-image.shape[0] :, -image.shape[1] :] = image[::-1, :]\n except:\n return 2, None\n\n return 0, result", "def tilt(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_alt = np.array([vec[0], vec[1], vec[2]])\n vec_z = np.array([0, 0, 1])\n # return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh\n return angle2vecs(vec_alt, vec_z)", "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def half_hermitian_to_real_inverse_fft_image_filter(*args, **kwargs):\n import itk\n instance = itk.HalfHermitianToRealInverseFFTImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)", "def rayleigh(th,r,wl,a,n1,n2):\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n return ((k**2)*(a**3)*((n_2-1)/(n_2+2))/r)*np.array([[np.cos(th), 0],[0,1]])", "def calculate_plane_normal(patches):\n normals = []\n for patch in patches:\n normal = get_normal(patch)\n normals.append(normal)\n # Taken naive mean of normals\n # TODO outlier removal\n normals = np.mean(np.array(normals), axis=0)\n return normals", "def directionalLight(*args, decayRate: int=0, discRadius: Union[float, bool]=0.0, exclusive:\n bool=True, intensity: Union[float, bool]=0.0, name: Union[AnyStr, bool]=\"\",\n position: Union[List[float, float, float], bool]=None, rgb:\n Union[List[float, float, float], bool]=None, rotation: Union[List[float,\n float, float], bool]=None, shadowColor: Union[List[float, float, float],\n bool]=None, shadowDither: Union[float, bool]=0.0, shadowSamples: Union[int,\n bool]=0, softShadow: bool=True, useRayTraceShadows: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[double], Any]:\n pass", "def rsdl(self):\n\n if self.opt['Monotone'] and self.k > 0:\n return np.linalg.norm((self.X - self.Y).ravel())\n return np.linalg.norm((self.X - self.Yprv).ravel())", "def set_reflectivity(self, f_reflec=0, f_refl=0):\n self.F_REFLEC = f_reflec\n self.F_REFL = f_refl", "def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None", "def FaceNormals(self):\n\n self.__do_memebers_exist__()\n\n points = np.copy(self.points)\n if points.shape[1] < 3:\n dum = np.zeros((points.shape[0],3))\n dum[:,:points.shape[1]] = points\n points = dum\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n faces = self.faces\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n else:\n raise ValueError(\"Cannot compute face normals on {}\".format(self.element_type))\n\n\n face_coords = self.points[faces[:,:3],:]\n\n p1p0 = face_coords[:,1,:] - face_coords[:,0,:]\n p2p0 = face_coords[:,2,:] - face_coords[:,0,:]\n\n normals = np.cross(p1p0,p2p0)\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n normals[:,2] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetElementsWithBoundaryFaces()\n meds = self.Medians()\n face_element_meds = meds[self.boundary_face_to_element[:,0],:]\n p1pm = face_coords[:,1,:] - face_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n return normals", "def illuminate(self, ray, hit, scene):\n # TODO A5 copy implementation from A4 and modify\n # material parameters need to be looked up by the uv's at the intersection point\n l = self.position - hit.point\n epsilon = 0.000001\n point = hit.point + l*epsilon\n shadow_ray = Ray(point, l, epsilon, 1)\n\n if (scene.intersect(shadow_ray).t > 1):\n\n # diffuse shading\n intensity = self.intensity\n position = self.position\n normal = hit.normal\n dist_to_source = np.linalg.norm(hit.point - position)\n diffuse_coeff = hit.material.lookup(hit.material.k_d, hit)\n v = (-1) * normalize(ray.direction)\n light_ray = normalize(position - hit.point)\n specular_coeff = hit.material.lookup(hit.material.k_s, hit)\n p = hit.material.lookup(hit.material.p, hit)\n\n # diffuse shading\n # diffuse_output = diffuse_coeff * (np.maximum(0, np.dot(normal, light_ray)) / (dist_to_source ** 2)) * intensity\n # specular shading\n shade_ray = Ray(hit.point, light_ray, epsilon)\n if (scene.intersect(shade_ray).t == np.inf):\n h = (v + light_ray) / np.linalg.norm(v + light_ray)\n specular_output = (diffuse_coeff + specular_coeff * ((np.dot(normal, h)) ** p)) * (\n np.maximum(0, np.dot(normal, light_ray)) / (dist_to_source ** 2)) * intensity\n return specular_output\n\n return vec([0, 0, 0])", "def calc_rfl(self, x_surface, geom):\n\n return self.rfl", "def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def _reflect_points(points, p1=(0, 0), p2=(1, 0)):\n # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line\n points = np.array(points)\n p1 = np.array(p1)\n p2 = np.array(p2)\n if np.asarray(points).ndim == 1:\n return (\n 2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (points - p1)) / norm(p2 - p1) ** 2)\n - points\n )\n if np.asarray(points).ndim == 2:\n return np.array(\n [\n 2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (p - p1)) / norm(p2 - p1) ** 2)\n - p\n for p in points\n ]\n )", "def mirrorPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n rays = sources.subannulus(220.,221.,100./220.,N,zhat=-1.)\n #Transform to node position\n tran.transform(rays,220,0,0,0,0,0)\n #Set up finite source distance\n raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n rays[4] = rays[1]/raydist\n rays[5] = rays[2]/raydist\n rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n tran.transform(rays,-220+conic.primrad(8450.,220.,8400.),0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-conic.primrad(8450.,220.,8400.),0,-8450.,0,0,0,\\\n coords=coords)\n## surf.wolterprimary(rays,220.,8400.)\n surf.primaryLL(rays,220.,8400.,8500.,8400.,100./220,*plist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n tran.transform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n## surf.woltersecondary(rays,220.,8400.)\n surf.secondaryLL(rays,220.,8400.,1.,8400.,8300.,100./220,*hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2, \\\n airnp.mean(rays[1]), np.mean(rays[2])", "def process_downthrow_direction(self,fault_properties,fault_orientations): \n for fname in fault_properties.index:\n if fault_properties.loc[fname,'downthrow_dir'] == 1.0:\n logger.info(\"Estimating downthrow direction using fault intersections\")\n # fault_intersection_angles[f]\n if np.abs(fault_properties.loc[fname,'downthrow_dir'] - fault_properties.loc[fname,'dip_dir']) > 90:\n fault_orientations.loc[fault_orientations['fault_name'] == fname, 'DipDirection'] -= 180#displacements_numpy[\n fault_properties.loc[fname,'dip_dir']-=180", "def surface_norm(self, pt):\n\n return (pt - self.origin).normalize()", "def _reflect_points(points, p1 = (0,0), p2 = (1,0)):\n # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line\n points = np.array(points); p1 = np.array(p1); p2 = np.array(p2)\n if np.asarray(points).ndim == 1:\n return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points\n if np.asarray(points).ndim == 2:\n return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points])", "def rayonDeCourbur(**kwargs):\n a = 0\n b = 0\n try:\n if kwargs[\"ref\"] == \"local\":\n a = 6378249.145\n b = 6356515\n elif kwargs[\"ref\"] == \"global\":\n a = 6378137\n b = 6356752.314\n elif kwargs[\"a\"] and kwargs[\"b\"]:\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n except KeyError:\n return {\"erreur\": \"params a and b is required, you can use ref too which has two possible value: local and global\"}\n if a != 0 and b != 0:\n try:\n phi = math.radians(kwargs[\"phi\"])\n e2 = 1-(b/a)**2\n w = math.sqrt(1-e2*math.sin(phi)**2)\n M = a*(1-e2)/w**3\n N = a/w\n if \"alpha\" in kwargs.keys():\n alpha = math.radians(kwargs[\"alpha\"])\n rAlpha = (M*N)/(M*math.sin(alpha)**2+N*math.cos(alpha)**2)\n return{\"M\": M, \"N\": N, \"rAlpha\": rAlpha, \"1/R\": 1/rAlpha}\n elif kwargs[\"radius\"] == \"M\":\n return {\"M\": M}\n elif kwargs[\"radius\"] == \"N\":\n return {\"N\": N}\n except KeyError as err:\n return {\"erreur\": f\"{format(err)} is required!\"}", "def direction(self):\n norm=math.sqrt(self.x**2 + self.y**2 + self.z**2)\n return Vector3(self.x/norm, self.y/norm, self.z/norm)", "def getFar(self):\n return self.light.node().getLens().getFar()", "def rayleigh(th,r,wl,a,n1,n2):\n c = np.cos(th)\n c2,s2 = c**2, np.sin(th)**2\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n m = (k**4)*(a**6)*(abs(n_2-1)**2) / ((abs(n_2+2)**2) * 2 * (r**2))\n return m*np.array([[1+c2 , -s2 , 0 , 0],\n [-s2 , 1+c2 , 0 , 0],\n [0 , 0 , 2*c , 0],\n [0 , 0 , 0 , 2*c]])", "def setNormalForce(self):\n if self.onGround:\n self.fn = -self.fg\n self.vy = 0\n if self.inGround:\n self.fn = -self.fg\n self.vy = -5\n if self.hittingCeilling:\n self.fn = 1\n self.vy = 2\n if self.hittingWallRight:\n self.fn = -1\n self.vx = -.1-abs(self.vx)\n if self.hittingWallLeft:\n self.fn = 1\n self.vx = .1+1*abs(self.vx)\n elif (not self.onGround) and (not self.hittingCeilling) and (not self.hittingWallLeft) and (not self.hittingWallRight):\n self.fn = 0", "def _wf(self, p):\n r = self.faces - p\n n = norm(r, axis=2)\n num = row_wise_dot(r[:, 0, :], np.cross(r[:, 1, :], r[:, 2, :]))\n den = n[:, 1] * n[:, 2] * n[:, 0]\n for i in range(3):\n j = (i + 1) % 3\n k = (i + 2) % 3\n den += row_wise_dot(r[:, i, :], r[:, j, :]) * n[:, k]\n return 2*np.arctan2(num, den)", "def reflect(self):\n self.vertices[-1, :] = self.reflected", "def get_face_normal(self):\n if self.mesh is None:\n self.load_mesh()\n self.mesh.set_face_normal()", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def normalize_face_landmarks(face_landmarks):\r\n\tface_landmarks_norm = np.zeros(face_landmarks.shape)\r\n\t\r\n\tfor (i, lm) in enumerate(face_landmarks):\r\n\t\tface_landmarks_norm[i] = lm - lm[nose_center_idx]\r\n\t\t\t\r\n\tstd_x = np.std(face_landmarks_norm[:,:,0].reshape((-1,)))\r\n\tstd_y = np.std(face_landmarks_norm[:,:,1].reshape((-1,)))\r\n\t\r\n\tface_landmarks_norm[:,:,0] = np.multiply(face_landmarks_norm[:,:,0], 1./std_x)\r\n\tface_landmarks_norm[:,:,1] = np.multiply(face_landmarks_norm[:,:,1], 1./std_y)\r\n\t\r\n\treturn face_landmarks_norm", "def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])", "def flower(t,r,n,angle):\n for i in range(n):\n petal(t,r,angle)\n lt(t,360/n)", "def getDirection(self):\n return self.ray.direction", "def update_normal(self):\n options = self.get_direction_options()\n if self.is_at_intersection() or self.last_position == (self.rect.centerx, self.rect.centery):\n self.direction = self.get_chase_direction(options)\n if self.direction == 'u' and 'u' in options:\n self.rect.centery -= self.speed\n elif self.direction == 'l' and 'l' in options:\n self.rect.centerx -= self.speed\n elif self.direction == 'd' and 'd' in options:\n self.rect.centery += self.speed\n elif self.direction == 'r' and 'r' in options:\n self.rect.centerx += self.speed\n self.change_eyes(self.direction or 'r') # default look direction to right\n self.image = self.norm_images.next_image()", "def cutDownAngle_gk(state, raySortie):\n position = state.my_goal\n diff = state.ball_pos - position\n diff.norm = raySortie\n position += diff\n return goTo(state,position)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def test_compute_pixel_ray_directions_vectorized_invalid_focal_lengths() -> None:\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n fx = 10\n fy = 11\n\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n with pytest.raises(ValueError):\n pinhole_camera.compute_pixel_ray_directions(uv)" ]
[ "0.6952331", "0.59180486", "0.58824384", "0.57535976", "0.57386696", "0.55146694", "0.5512906", "0.5464423", "0.5416558", "0.5369161", "0.5335698", "0.52933735", "0.52365685", "0.5117482", "0.50288486", "0.49778453", "0.49726534", "0.49646103", "0.49436632", "0.49390393", "0.49358514", "0.49279857", "0.49221197", "0.49185342", "0.48926383", "0.48917755", "0.48183814", "0.48181015", "0.47715023", "0.4744581", "0.47405708", "0.4739081", "0.47374448", "0.4731823", "0.47298816", "0.47206044", "0.47196254", "0.47159818", "0.46972668", "0.46687692", "0.4657364", "0.46520603", "0.4648283", "0.46470597", "0.463102", "0.4630708", "0.46299496", "0.46229905", "0.46206763", "0.4620509", "0.4614964", "0.46145576", "0.4610828", "0.4609762", "0.46077985", "0.46066543", "0.45913717", "0.45899048", "0.45878804", "0.458732", "0.45775932", "0.45692024", "0.45644066", "0.45632887", "0.45619342", "0.45579934", "0.45530155", "0.45443854", "0.45437765", "0.45431706", "0.45422828", "0.45350268", "0.4526983", "0.45251003", "0.45215443", "0.44989517", "0.44978988", "0.449772", "0.44973174", "0.44936666", "0.44929302", "0.44912007", "0.44899514", "0.44876435", "0.44832283", "0.44822136", "0.44786412", "0.4475178", "0.447507", "0.4472555", "0.44713682", "0.44709554", "0.4468598", "0.44640005", "0.44554716", "0.44540277", "0.44490364", "0.4446113", "0.44439602", "0.44349873" ]
0.8033125
0
Tests the API endpoint to get hashrate resale details with missing field
def test_mining_hashrate_resale_details_with_missing_field(params): client = Client(key, secret) client.mining_hashrate_resale_details.when.called_with(**params).should.throw( ParameterRequiredError )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mining_hashrate_resale_details():\n\n client = Client(key, secret)\n response = client.mining_hashrate_resale_details(123, \"user_name\")\n response.should.equal(mock_item)", "def test_retire_rate_plan(self):\n pass", "def test_validation_get_valid_resampling(self):\n self.assertIsInstance(api.validation.fetch_resampling(), dict)", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_get_details7(self):\n pass", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_by('quantity')\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)", "def test_get_pay_in_details(self):\n pass", "async def test_get_rates_get(client):\n params = [('exchangeType', 'exchange_type_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/public/exchange/1/getRates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_dealer_ratings(self):\n pass", "def test_get_restaurant_review_list_fail(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 401)", "def test_get_metadata_for_rate_plan(self):\n pass", "def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})", "def test_view_reteta_detail(self):\n reteta = sample_reteta(user=self.user)\n reteta.tags.add(sample_tag(user=self.user))\n reteta.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(reteta.id)\n res = self.client.get(url)\n serializer = RetetaDetailSerializer(reteta)\n self.assertEqual(res.data, serializer.data)", "def test_get_all_rate_plans(self):\n pass", "def test_get_object_dict(self):\n review = self.review[0].get_dict()\n self.assertIsNotNone(review['reviewer_id'])\n self.assertIsNotNone(review['book_id'])\n self.assertEqual(5, review['rate'])", "def test_get_risk_profile_using_get(self):\n pass", "def test_get_rate_plan_by_product(self):\n pass", "def test_company_EU_GR_vies_zero(self, mock_check):\n mock_check.return_value = {\"valid\": True}\n self.assertEqual(self.policy.get_tax_rate(\"EL090145420\", \"GR\"), (None, True))", "def test_get_rating(self):\n url = reverse('rate-game')\n data = {'igdb': self.game.igdb}\n response = self.client.get(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_limited_to_user_who_made_resgate(self):\n user2 = get_user_model().objects.create_user(\n 'test2@email.com',\n 'test12345'\n )\n sample_resgate(user=user2)\n sample_resgate(user=self.user)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.filter(user=self.user)\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data, serializer.data)", "def test_household_get(self):\n url = '/household/'+ self.test_id + '/'\n response = self.tester.get(url,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_update_rate_plan(self):\n pass", "def test_get(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/1/')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n obj = resp.data\n self.assertEqual(obj['id'], 1)\n self.assertEqual(obj['terms'], '0/net')\n self.assertEqual(obj['revision'], 0)\n \n #Test items\n self.assertIn('items', obj)\n self.assertEqual(len(obj['items']), 1)\n item1 = obj['items'][0]\n #self.assertIn('purchasing_units', item1)\n #self.assertEqual(item1['purchasing_units'], 'm')", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_get_pricing_with_incorrect_instrument():\n res = oanda.get_pricing(CONFIG, 'XXX500_WRONG')\n assert res[0] == 400", "def test_get_risk_profile_all_using_get(self):\n pass", "def test_gelir_api(self):\n response = self.client.get(reverse('gelir-json', args=[self.sample_type]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEquals(response.status_code, 200)", "def test_detail(request, pk, format=None):\n try:\n snippet = Base_price.objects.get(pk=pk)\n except Base_price.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = Base_priceSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = Base_priceSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )", "def test_companies_company_id_data_tax_rates_get(self):\n pass", "def test_yearn_api(database, ethereum_inquirer):\n # mock coingecko response\n original_request = requests.get\n\n def mock_yearn_api(url, timeout):\n \"\"\"Return only two yearn vaults for the API response\"\"\"\n if YEARN_OLD_API in url:\n return MockResponse(HTTPStatus.OK, \"\"\"[{\"inception\":14891068,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\",\"symbol\":\"yvCurve-STG-USDC\",\"name\":\"yvCurve-STG-USDC 0.4.3\",\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x341bb10D8f5947f3066502DC8125d9b8949FD3D6/logo-128.png\",\"token\":{\"name\":\"Curve.fi Factory Crypto Pool: STG/USDC\",\"symbol\":\"STGUSDC-f\",\"address\":\"0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1\",\"decimals\":18,\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1/logo-128.png\"},\"tvl\":{\"total_assets\":1066762132988328431382564,\"price\":1.613069263536325,\"tvl\":1720761.2082279222},\"apy\":{\"type\":\"convex\",\"gross_apr\":0.14584764353034685,\"net_apy\":0.09226416095055612,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.02,\"keep_crv\":null,\"cvx_keep_crv\":0.1},\"points\":null,\"blocks\":null,\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x916011bD2d333fBA14dBB8bf0BdF01e3384FD2e6\",\"name\":\"StrategyConvexSTGUSDC\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":18,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812577,\"migration\":{\"available\":false,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\"}},{\"inception\":14980240,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\",\"symbol\":\"yvUSDT\",\"name\":\"yvUSDT 0.4.3\",\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x3B27F92C0e212C671EA351827EDF93DB27cc0c65/logo-128.png\",\"token\":{\"name\":\"Tether USD\",\"symbol\":\"USDT\",\"address\":\"0xdAC17F958D2ee523a2206206994597C13D831ec7\",\"decimals\":6,\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo-128.png\"},\"tvl\":{\"total_assets\":14938928651062,\"price\":1.0000823,\"tvl\":14940158.124889985},\"apy\":{\"type\":\"v2:averaged\",\"gross_apr\":0.023362870862237983,\"net_apy\":0.018862632100866916,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.0,\"keep_crv\":null,\"cvx_keep_crv\":null},\"points\":{\"week_ago\":0.013129557974331796,\"month_ago\":0.018862632100866916,\"inception\":0.022614793789739185},\"blocks\":{\"now\":17565983,\"week_ago\":17516180,\"month_ago\":17345663,\"inception\":15243268},\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x016919386387898E4Fa87c7c4D3324F75f178F12\",\"name\":\"0x01691938\"},{\"address\":\"0x087794F304aEB337388a40e7c382A0fEa78c47fC\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeUniV3StablesJoint(USDC-USDT)\"},{\"address\":\"0xBc04eFD0D18685BA97cFAdE4e2D3171701B4099c\",\"name\":\"StrategyLenderYieldOptimiser\"},{\"address\":\"0xE7A8Cbc43a0506d3A328393C1C30548835256d7D\",\"name\":\"Stargate-v2-USDT\"},{\"address\":\"0xde6F5b2452F94337a428c86b5D2F143383b4D573\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeBalancerTripod(bb-a-USD)\"},{\"address\":\"0x8829f62FCe1DFBfA3EB60eBE95133D5F43b9BD04\",\"name\":\"EmptyStrat\"},{\"address\":\"0xd8F414beB0aEb5784c5e5eBe32ca9fC182682Ff8\",\"name\":\"StrategyLenderYieldOptimiser\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":6,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812580,\"migration\":{\"available\":false,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\"}}]\"\"\") # noqa: E501\n nonlocal original_request\n return original_request(url, timeout)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_before = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n with patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_after = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n last_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert last_queried_ts is not None\n\n assert state_after != state_before\n # 140 is the number of vaults at the moment of writing this test\n assert len(state_before) == 0\n assert int(state_after[0]) == 2\n\n # check that a new vault was added\n token = GlobalDBHandler.get_evm_token(\n address=string_to_evm_address('0x341bb10D8f5947f3066502DC8125d9b8949FD3D6'),\n chain_id=ChainID.ETHEREUM,\n )\n\n assert token is not None\n assert token.name == 'yvCurve-STG-USDC 0.4.3'\n assert token.symbol == 'yvCurve-STG-USDC'\n assert token.protocol == YEARN_VAULTS_V2_PROTOCOL\n assert token.started == Timestamp(1654174125)\n\n # trigger the query again and check that the timestamp was updated\n future_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(seconds=WEEK_IN_SECONDS) # noqa: E501\n with freeze_time(future_timestamp), patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n new_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert new_queried_ts is not None\n assert new_queried_ts > last_queried_ts", "def test_retrieve_reteta_list(self):\n sample_reteta(user=self.user)\n sample_reteta(user=self.user)\n\n res = self.client.get(RETETA_URL)\n\n retete = Reteta.objects.all().order_by('-id')\n serializer = RetetaSerializer(retete, many=True) # return list\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_get_dealer_historical_inventory(self):\n pass", "def test_quote_guest_payment_method_management_v1_get_get(self):\n pass", "def test_get_specific_sale_record(self):\n \n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.get(\n '{}/saleorder'.format(self.base_url), json={\n 'sale_id': 1,\n 'name': \"Sample Bags\",\n 'price': 20,\n 'quantity': 1,\n 'totalamt': 20\n },\n headers=dict(Authorization=token),\n content_type='application/json')\n\n response = self.app_test_client.get(\n '{}/saleorder/1'.format(self.base_url),\n headers=dict(Authorization=token),\n content_type='application/json'\n )\n \n self.assertEqual(response.status_code, 200)", "def test_data_object_get_details(self):\n pass", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def test_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.rate_details[\"user\"][\"slug\"] = \"-ss-dd-dd-ff\"\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Article not found', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_can_get_risk_details(self):\n risk = self.create_risk()\n\n response = self.client.get(f'/api/v0/risk/{risk.id}/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual((response.data['risk_type'],\n response.data['risk_data']),\n (RiskSerializer(instance=risk)['risk_type'].value,\n RiskSerializer(instance=risk)['risk_data'].value))", "def test_response(self):\n\n from rubber import settings, resource\n settings.RUBBER_MOCK_HTTP_RESPONSE = \"\"\"{\"took\":2,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":2,\"max_score\":1.0,\"hits\":[{\"_index\":\"auth\",\"_type\":\"user\",\"_id\":\"6\",\"_score\":1.0, \"_source\" : {\"username\": \"guillaume\", \"first_name\": \"\", \"last_name\": \"\", \"is_active\": true, \"is_superuser\": false, \"is_staff\": false, \"last_login\": \"2012-08-02T08:30:11\", \"groups\": [], \"user_permissions\": [], \"password\": \"pbkdf2_sha256$10000$M1nRKJfbvdQf$ouX5u9FOUF/MKhhwuwYbiuoVidFITsBrEstGBB4mzZA=\", \"email\": \"somemail@test.com\", \"date_joined\": \"2012-08-02T08:30:11\"}},{\"_index\":\"auth\",\"_type\":\"user\",\"_id\":\"8\",\"_score\":1.0, \"_source\" : {\"username\": \"stephane\", \"first_name\": \"\", \"last_name\": \"\", \"is_active\": true, \"is_superuser\": false, \"is_staff\": false, \"last_login\": \"2012-08-02T09:14:38\", \"groups\": [], \"user_permissions\": [], \"password\": \"pbkdf2_sha256$10000$ORDHZAnNqTwF$UGmkUCyH0/uh1ruP93ZSTyog9Wi5g2qc+m/fxowigFs=\", \"email\": \"othermail@test.com\", \"date_joined\": \"2012-08-02T09:14:38\"}}]}}\"\"\"\n\n requestmock = RequestMock()\n resource.requests = requestmock\n\n response = self.Article.elasticsearch.search({})\n \n self.assertEquals(2, response.json['took'])\n\n from rubber.response import Response\n self.assertTrue(isinstance(response, Response))", "def get_details(self):", "def test_api_tables_endpoint(self):\n params = {'lender': '90000451965', 'metro': '49180'}\n url = reverse(tables)\n resp = self.client.get(url, params)\n result_dict = json.loads(resp.content)\n self.assertTrue(isinstance(result_dict, dict))\n keys = ['lender', 'peers', 'odds', 'msa', 'counties']\n lender_keys = ['hma_pct', 'lma_pct', 'mma_pct', 'lma', 'mma', 'hma', 'lar_total']\n for key in keys:\n self.assertTrue(key in result_dict['table_data'].keys())\n for key in lender_keys:\n self.assertTrue(key in result_dict['table_data']['lender'].keys())\n self.assertTrue(len(result_dict['table_data']['counties']) > 0)", "def test_get_rule_details(self):\n pass", "def test_get__gate_empty(self):\n testing_config.sign_out()\n with test_app.test_request_context(self.request_path + '/1'):\n actual_response = self.handler.do_get(\n feature_id=self.feature_id, gate_id=self.gate_1_id)\n self.assertEqual({'votes': []}, actual_response)", "def test_get(self):\n self.assertEqual(\n self.attempts[0],\n self.resource.get(self.attempts[0][_ATTEMPT.attempt_id]))", "def test_get_primary(self):\n response = self.client.open('/v1/primary/',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_get_usd_rates_succeeds(self, mock):\n\n mock.get(self.fixer_endpoint, status_code=200, text=fixture_get_usd_rates_succeeds)\n\n base = 'USD'\n r = self.simulate_get('/api/v1/rates', params={'symbol': base})\n self.assertEqual(r.status, falcon.HTTP_200)\n\n fixture = json.loads(fixture_get_usd_rates_succeeds)\n response_rates = r.json\n for response_rate in response_rates:\n sell_currency = response_rate['sell_currency']\n buy_currency = response_rate['buy_currency']\n rate = response_rate['rate']\n\n self.assertEqual(sell_currency, base)\n self.assertEqual(rate, fixture['rates'].get(buy_currency))", "def test_get_all_ordes(self):\n test_client = app.test_client()\n test_client.post('/api/v1/orders', data=json.dumps(dict(\n order_number=\"order_number\", order_description=\"order_description\",\n order_price=\"order_price\", size=\"size\")), content_type='application/json')\n response = test_client.get('/api/v1/orders')\n self.assertEqual(len(json.loads(response.data)), 1)", "def test_smoker_latest_get(self):\n pass", "def test_retrieve_1_by_all(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 1)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[0]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def test_get_rate_plan_by_product_and_rate_plan(self):\n pass", "def test_award_list_summary(self):\n resp = self.client.get('/api/v1/awards/summary/')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue(len(resp.data) > 2)\n\n self.assertEqual(self.client.get('/api/v1/awards/summary/fain/ABCD').status_code, 200)\n self.assertEqual(self.client.get('/api/v1/awards/summary/uri/ABCD').status_code, 200)\n self.assertEqual(self.client.get('/api/v1/awards/summary/piid/ABCD').status_code, 200)\n self.assertEqual(self.client.get('/api/v1/awards/summary/?funding_fpds=3100').status_code, 200)\n\n self.assertEqual(self.client.post('/api/v1/awards/summary/', content_type='application/json', data=json.dumps({\"page\": 1, \"limit\": 10})).status_code, 200)\n self.assertEqual(self.client.post('/api/v1/awards/summary/', content_type='application/json', data=json.dumps({\"page\": 1, \"limit\": 10, \"filters\": [{\"field\": \"funding_agency__fpds_code\", \"operation\": \"equals\", \"value\": \"0300\"}]})).status_code, 200)\n self.assertEqual(self.client.post('/api/v1/awards/summary/', content_type='application/json', data=json.dumps({\"page\": 1, \"limit\": 10, \"filters\": [{\"combine_method\": \"OR\", \"filters\": [{\"field\": \"funding_agency__fpds_code\", \"operation\": \"equals\", \"value\": \"0300\"}, {\"field\": \"awarding_agency__fpds_code\", \"operation\": \"equals\", \"value\": \"0300\"}]}]})).status_code, 200)\n self.assertEqual(self.client.post('/api/v1/awards/summary/', content_type='application/json', data=json.dumps({\"page\": 1, \"limit\": 10, \"filters\": [{\"field\": \"funding_agency__fpds_code\", \"operation\": \"ff\", \"value\": \"0300\"}]})).status_code, 400)", "def test_detail_format(self) -> None:\n r = self.perform_request('detail', True)\n self.assert_json_schema(r.json(), self.get_details_schema())", "def test_get(self):\n pass", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def test_get(self):\n expected_response = {\n 'id': 1111,\n 'first_name': 'Jhon',\n 'last_name': 'Doe',\n 'user_id': 1001,\n 'telegram_id': None\n }\n\n response = self.client.get(self.url)\n self.assertJSONEqual(json.dumps(expected_response), json.loads(response.content))\n self.assertEqual(response.status_code, 200)", "def test_api_response_data(self):", "def test_AlgorithmsIdHandler_GET_Empty(self):\n searchedId='xyz1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(404, response.status_int, msg='Non existent Algorithm was found in empty database')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Algorithm Not Found', response.normal_body.decode(encoding='UTF-8'))", "def test_client_verification_retrieve(self):\n pass", "def test_xblockcompletion_get_data_researcher(self):\n data = {\n 'format':'resumen',\n 'course': str(self.course.id)\n }\n response = self.client_data_researcher.get(reverse('xblockcompletion-data:data'), data)\n request = response.request\n r = json.loads(response._container[0].decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(r['status'], 'El reporte de preguntas esta siendo creado, en un momento estará disponible para descargar.')", "def test_retrieve(self):\n stats_data = self.stats_data\n facility = self.facility\n\n obj = FacilityPatientStatsHistory.objects.create(\n facility=facility, entry_date=datetime.date(2020, 4, 1), **stats_data\n )\n\n response = self.client.get(self.get_url(entry_id=obj.external_id), format=\"json\")\n self.assertDictEqual(\n response.json(), self.get_detail_representation(stats_data, facility=facility),\n )", "def test_get_pricing_with_correct_instrument():\n res = oanda.get_pricing(CONFIG, 'SPX500_USD')\n assert res[0] == 200\n assert isinstance(res[1], dict)\n # we want a price as result\n assert len(res[1]['prices']) > 0", "def test_company_EU_GR_vies_tax(self):\n self.assertEqual(self.policy.get_tax_rate(\"123456\", \"GR\"), (24, False))", "def test_resourcenotfound():\n\n URL_STR = \"http://52.24.157.193:5000/api/fibonacci/foo\"\n response = requests.get( URL_STR )\n data = response.json()\n assert response.status_code == 404", "def test_detail(self):\n self.assertEqual(self.product_1.id, 1)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {\n 'id': self.product_1.id,\n 'name': self.product_1.name,\n 'sku': self.product_1.sku,\n 'category': self.product_1.category.id,\n 'description': self.product_1.description,\n 'price': str(self.product_1.price),\n 'created': '2018-12-20T10:15:30Z',\n 'featured': self.product_1.featured\n }\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(response.json(), expected)", "def test_expense_summary_loads_properly(self):\n response = self.client.get('your_server_ip:8000/auth/login/expense/expense_summary')\n self.assertEqual(response.status_code, 404)", "def test_error_no_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = []\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_retrive_recipe_detail(self):\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user))\n recipe.ingredient.add(create_sample_ingredient(user=self.sample_user))\n\n detail_URL = get_detail_URL(recipe.id)\n res = self.client.get(detail_URL)\n\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)", "def test_retrieve_recipe(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('id')\n serializer = RecipeSerializer(recipes,many=True)\n\n print(json.dumps(serializer.data, indent=1))\n print('ok')\n print(json.dumps(res.data, indent=1))\n self.assertTrue(res.status_code,status.HTTP_200_OK)\n self.assertEqual(res.data,serializer.data)", "def test_get_star__no_existing(self):\n email = 'user1@example.com'\n feature_id = self.feature_1.key.integer_id()\n actual = notifier.FeatureStar.get_star(email, feature_id)\n self.assertEqual(None, actual)", "def test_cannot_get_other_attendant_sales(self):\n response = self.client.get(\n '/self.base_url/sales/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You can only view your sales\")\n self.assertEqual(response.status_code,401)", "def test_getTLEFromCatalogEntryReturns400IfNoTLEFoundForTime(self):\n response = self.client.get('/api/v1/catalogentry/25544/tle/?time=20000825200000')\n self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_rate_article_without_token(self):\n response = self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.assertIn(\n 'Authentication credentials were not provided.', str(\n response.data))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_get_review_detail_fail(self):\n client = Client()\n response = client.get('/api/review/1/')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/7/')\n self.assertEqual(response.status_code, 404)", "def test_api_can_get_all_pressures(self):\n res = self.client().post('/pressures/', data=self.pressure)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/pressures/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('120', str(res.data))", "def test_client_risk_assessment_retrieve(self):\n pass", "def test_get_purchase_with_empty_result(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=test-username&Password=test-password&Table=Purchase&Format=tab&Location=1\"\n file = open(\"./tests/fixtures/purchase_empty.tsv\", \"r\")\n m.register_uri(\"GET\", url, status_code=200, text=file.read())\n file.close\n\n cellartracker = CellarTracker(username=\"test-username\", password=\"test-password\")\n data = cellartracker.get_purchase()\n self.assertEqual([], data)", "def test_get_product_rate_plan_by_id(self):\n pass", "def test_nascardrivers_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/nascardrivers/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def test_get_offers(self):\n pass", "def test_get_small_and_light_fee_preview(self):\n pass", "def test_fill_invalid_call(self):\n request = self.client.post('/stocks/fill/', follow=True, secure=True)\n self.assertEqual(request.status_code, 405)", "def test_predict_house_price():\n with app.test_client()as c:\n response = c.get('/REST/api/v1.0/predict')\n assert response.status_code == 201", "def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_get_restaurant_by_id_not_number(self):\n resp = self.test_client.get(self.API_BASE + '/hello', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 400)", "def get_details(self):\n raise Exception(\"bad details\")", "def test_smoker_get(self):\n pass", "def test_GET4(self):\n r = requests.get(self.address + \"/carcar/23\")\n self.assertEqual(r.status_code, 400)", "def testGet(self):\n response = self.runGet(self.root, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_retrieve_specific_ingredient(self):\n ingredient = create_sample_ingredient(user=self.user, name='Honey')\n\n url = detail_url(ingredient.id)\n response = self.client.get(url)\n\n serializer = IngredientSerializer(ingredient)\n\n self.assertEqual(response.data, serializer.data)", "def test_accounting_gateways_resource_methods(self, mock_url):\n account_id = 1234\n resource_id = 2345\n\n list_response = {\"gateways\": [], \"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}\n single_response = {}\n with patch.object(AccountingResource, \"_request\", return_value=list_response) as mock_request:\n self.freshBooksClient.gateways.list(account_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n self.freshBooksClient.gateways.delete(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.get(account_id, resource_id)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.create(account_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.update(account_id, resource_id, {})", "def test_api_predictors_get(self):\n pass", "def test_companies_company_id_data_tax_rates_tax_rate_id_get(self):\n pass", "def test_get__gate_some(self):\n testing_config.sign_out()\n self.vote_1_1.put() # Found.\n self.vote_2_1.put() # On a different gate.\n\n with test_app.test_request_context(self.request_path + '/1'):\n actual_response = self.handler.do_get(\n feature_id=self.feature_id, gate_id=self.gate_1_id)\n\n self.assertEqual({'votes': [self.vote_expected1]}, actual_response)", "def test_retrieve_1_by_1(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"Farming Village\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 2)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(card_stats[0]['condition'][0], 'Farming Village')\n\n self.assertEquals(card_stats[1]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[1]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def test_correctitemid_correctresponsebody(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/54590\"\r\n r = requests.get(url)\r\n\r\n expected = {\r\n 'itemId': 54590,\r\n 'name': 'Sharpened Twilight Scale',\r\n 'gearScore': 310\r\n }\r\n\r\n self.assertEqual(r.json(), expected)" ]
[ "0.6968484", "0.62199587", "0.61830664", "0.6144193", "0.61011153", "0.59623325", "0.58962244", "0.58485985", "0.5811617", "0.5798121", "0.5735763", "0.5712043", "0.56655514", "0.5655179", "0.56484246", "0.5585292", "0.5584498", "0.5572597", "0.5553329", "0.55408216", "0.55361027", "0.55315655", "0.5476421", "0.5474295", "0.5471295", "0.54707515", "0.5468141", "0.5459678", "0.54576546", "0.54391783", "0.54226404", "0.5412031", "0.54032034", "0.54018253", "0.54000354", "0.53951555", "0.5389902", "0.5382505", "0.5379738", "0.53773504", "0.53760576", "0.53746444", "0.5372589", "0.53709817", "0.536421", "0.53511375", "0.5347422", "0.53424144", "0.5341832", "0.5338541", "0.5327804", "0.53226864", "0.5321768", "0.53205353", "0.53192705", "0.53153574", "0.5314581", "0.53044885", "0.52961624", "0.52936786", "0.5293131", "0.52921665", "0.5289252", "0.52869695", "0.5285719", "0.528475", "0.52761143", "0.52719736", "0.5268172", "0.52674353", "0.52633494", "0.52628833", "0.5262141", "0.5258429", "0.52565867", "0.525223", "0.5245227", "0.5243826", "0.5240081", "0.5239039", "0.5236272", "0.52283436", "0.52279943", "0.52241194", "0.5223007", "0.52074313", "0.5204491", "0.5202483", "0.51915395", "0.5190212", "0.5186027", "0.5181207", "0.51691896", "0.51606995", "0.5158725", "0.51559734", "0.51538295", "0.51519024", "0.5149094", "0.51490116" ]
0.7106652
0
Tests the API endpoint to get hashrate resale details
def test_mining_hashrate_resale_details(): client = Client(key, secret) response = client.mining_hashrate_resale_details(123, "user_name") response.should.equal(mock_item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_retire_rate_plan(self):\n pass", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_by('quantity')\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)", "async def test_get_rates_get(client):\n params = [('exchangeType', 'exchange_type_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/public/exchange/1/getRates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_rate_article(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_2)\n self.rate_details[\"user\"]['rate'] = 4\n self.client.post(\n self.rate_url,\n self.rate_details,\n format='json')\n response = self.client.get(\n self.view_rates_url + str(1) + \"/\",\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_rating(self):\n url = reverse('rate-game')\n data = {'igdb': self.game.igdb}\n response = self.client.get(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_mining_hashrate_resale_details_with_missing_field(params):\n client = Client(key, secret)\n client.mining_hashrate_resale_details.when.called_with(**params).should.throw(\n ParameterRequiredError\n )", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_validation_get_valid_resampling(self):\n self.assertIsInstance(api.validation.fetch_resampling(), dict)", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def test_get_details7(self):\n pass", "def test_get_primary(self):\n response = self.client.open('/v1/primary/',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_predict_house_price():\n with app.test_client()as c:\n response = c.get('/REST/api/v1.0/predict')\n assert response.status_code == 201", "def test_household_get(self):\n url = '/household/'+ self.test_id + '/'\n response = self.tester.get(url,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_limited_to_user_who_made_resgate(self):\n user2 = get_user_model().objects.create_user(\n 'test2@email.com',\n 'test12345'\n )\n sample_resgate(user=user2)\n sample_resgate(user=self.user)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.filter(user=self.user)\n serializer = ResgateSerializer(resgates, many=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data, serializer.data)", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def test_retrieve_reteta_list(self):\n sample_reteta(user=self.user)\n sample_reteta(user=self.user)\n\n res = self.client.get(RETETA_URL)\n\n retete = Reteta.objects.all().order_by('-id')\n serializer = RetetaSerializer(retete, many=True) # return list\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_get_pay_in_details(self):\n pass", "def test_gelir_api(self):\n response = self.client.get(reverse('gelir-json', args=[self.sample_type]))\n self.assertContains(response, self.proband.gel_id)\n self.assertEquals(response.status_code, 200)", "def test_view_reteta_detail(self):\n reteta = sample_reteta(user=self.user)\n reteta.tags.add(sample_tag(user=self.user))\n reteta.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(reteta.id)\n res = self.client.get(url)\n serializer = RetetaDetailSerializer(reteta)\n self.assertEqual(res.data, serializer.data)", "def test_lti20_get_with_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\",\r\n \"resultScore\": SCORE,\r\n \"comment\": COMMENT})", "def test_get_usd_rates_succeeds(self, mock):\n\n mock.get(self.fixer_endpoint, status_code=200, text=fixture_get_usd_rates_succeeds)\n\n base = 'USD'\n r = self.simulate_get('/api/v1/rates', params={'symbol': base})\n self.assertEqual(r.status, falcon.HTTP_200)\n\n fixture = json.loads(fixture_get_usd_rates_succeeds)\n response_rates = r.json\n for response_rate in response_rates:\n sell_currency = response_rate['sell_currency']\n buy_currency = response_rate['buy_currency']\n rate = response_rate['rate']\n\n self.assertEqual(sell_currency, base)\n self.assertEqual(rate, fixture['rates'].get(buy_currency))", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def test_get_all_rate_plans(self):\n pass", "def test_get_dealer_ratings(self):\n pass", "def test_api_can_get_all_pressures(self):\n res = self.client().post('/pressures/', data=self.pressure)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/pressures/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('120', str(res.data))", "def test_get_risk_profile_using_get(self):\n pass", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def test_yearn_api(database, ethereum_inquirer):\n # mock coingecko response\n original_request = requests.get\n\n def mock_yearn_api(url, timeout):\n \"\"\"Return only two yearn vaults for the API response\"\"\"\n if YEARN_OLD_API in url:\n return MockResponse(HTTPStatus.OK, \"\"\"[{\"inception\":14891068,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\",\"symbol\":\"yvCurve-STG-USDC\",\"name\":\"yvCurve-STG-USDC 0.4.3\",\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x341bb10D8f5947f3066502DC8125d9b8949FD3D6/logo-128.png\",\"token\":{\"name\":\"Curve.fi Factory Crypto Pool: STG/USDC\",\"symbol\":\"STGUSDC-f\",\"address\":\"0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1\",\"decimals\":18,\"display_name\":\"STGUSDC-f\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdf55670e27bE5cDE7228dD0A6849181891c9ebA1/logo-128.png\"},\"tvl\":{\"total_assets\":1066762132988328431382564,\"price\":1.613069263536325,\"tvl\":1720761.2082279222},\"apy\":{\"type\":\"convex\",\"gross_apr\":0.14584764353034685,\"net_apy\":0.09226416095055612,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.02,\"keep_crv\":null,\"cvx_keep_crv\":0.1},\"points\":null,\"blocks\":null,\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x916011bD2d333fBA14dBB8bf0BdF01e3384FD2e6\",\"name\":\"StrategyConvexSTGUSDC\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":18,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812577,\"migration\":{\"available\":false,\"address\":\"0x341bb10D8f5947f3066502DC8125d9b8949FD3D6\"}},{\"inception\":14980240,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\",\"symbol\":\"yvUSDT\",\"name\":\"yvUSDT 0.4.3\",\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0x3B27F92C0e212C671EA351827EDF93DB27cc0c65/logo-128.png\",\"token\":{\"name\":\"Tether USD\",\"symbol\":\"USDT\",\"address\":\"0xdAC17F958D2ee523a2206206994597C13D831ec7\",\"decimals\":6,\"display_name\":\"USDT\",\"icon\":\"https://rawcdn.githack.com/yearn/yearn-assets/4db1bb6b68ad0c7a75a1b3bf01025d2c22cfbaa7/icons/multichain-tokens/1/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo-128.png\"},\"tvl\":{\"total_assets\":14938928651062,\"price\":1.0000823,\"tvl\":14940158.124889985},\"apy\":{\"type\":\"v2:averaged\",\"gross_apr\":0.023362870862237983,\"net_apy\":0.018862632100866916,\"fees\":{\"performance\":0.2,\"withdrawal\":null,\"management\":0.0,\"keep_crv\":null,\"cvx_keep_crv\":null},\"points\":{\"week_ago\":0.013129557974331796,\"month_ago\":0.018862632100866916,\"inception\":0.022614793789739185},\"blocks\":{\"now\":17565983,\"week_ago\":17516180,\"month_ago\":17345663,\"inception\":15243268},\"composite\":null,\"error_reason\":null,\"staking_rewards_apr\":0},\"strategies\":[{\"address\":\"0x016919386387898E4Fa87c7c4D3324F75f178F12\",\"name\":\"0x01691938\"},{\"address\":\"0x087794F304aEB337388a40e7c382A0fEa78c47fC\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeUniV3StablesJoint(USDC-USDT)\"},{\"address\":\"0xBc04eFD0D18685BA97cFAdE4e2D3171701B4099c\",\"name\":\"StrategyLenderYieldOptimiser\"},{\"address\":\"0xE7A8Cbc43a0506d3A328393C1C30548835256d7D\",\"name\":\"Stargate-v2-USDT\"},{\"address\":\"0xde6F5b2452F94337a428c86b5D2F143383b4D573\",\"name\":\"Strategy_ProviderOfUSDTToNoHedgeBalancerTripod(bb-a-USD)\"},{\"address\":\"0x8829f62FCe1DFBfA3EB60eBE95133D5F43b9BD04\",\"name\":\"EmptyStrat\"},{\"address\":\"0xd8F414beB0aEb5784c5e5eBe32ca9fC182682Ff8\",\"name\":\"StrategyLenderYieldOptimiser\"}],\"endorsed\":true,\"version\":\"0.4.3\",\"decimals\":6,\"type\":\"v2\",\"emergency_shutdown\":false,\"updated\":1687812580,\"migration\":{\"available\":false,\"address\":\"0x3B27F92C0e212C671EA351827EDF93DB27cc0c65\"}}]\"\"\") # noqa: E501\n nonlocal original_request\n return original_request(url, timeout)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_before = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n with patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n state_after = globaldb_get_general_cache_values(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n )\n\n last_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert last_queried_ts is not None\n\n assert state_after != state_before\n # 140 is the number of vaults at the moment of writing this test\n assert len(state_before) == 0\n assert int(state_after[0]) == 2\n\n # check that a new vault was added\n token = GlobalDBHandler.get_evm_token(\n address=string_to_evm_address('0x341bb10D8f5947f3066502DC8125d9b8949FD3D6'),\n chain_id=ChainID.ETHEREUM,\n )\n\n assert token is not None\n assert token.name == 'yvCurve-STG-USDC 0.4.3'\n assert token.symbol == 'yvCurve-STG-USDC'\n assert token.protocol == YEARN_VAULTS_V2_PROTOCOL\n assert token.started == Timestamp(1654174125)\n\n # trigger the query again and check that the timestamp was updated\n future_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(seconds=WEEK_IN_SECONDS) # noqa: E501\n with freeze_time(future_timestamp), patch.object(requests, 'get', wraps=mock_yearn_api):\n query_yearn_vaults(db=database, ethereum_inquirer=ethereum_inquirer)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n new_queried_ts = globaldb_get_general_cache_last_queried_ts(\n cursor=cursor,\n key_parts=[GeneralCacheType.YEARN_VAULTS],\n value=str(state_after[0]),\n )\n assert new_queried_ts is not None\n assert new_queried_ts > last_queried_ts", "def test_update_rate_plan(self):\n pass", "def test_smoker_latest_get(self):\n pass", "def test_can_get_risk_details(self):\n risk = self.create_risk()\n\n response = self.client.get(f'/api/v0/risk/{risk.id}/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual((response.data['risk_type'],\n response.data['risk_data']),\n (RiskSerializer(instance=risk)['risk_type'].value,\n RiskSerializer(instance=risk)['risk_data'].value))", "def _get(self, r_uri):\n\n r_uri = \"https://\" + self.gateway_address + \":\" + self.gateway_port + r_uri\n r = requests.get(r_uri,\n auth=(self.sio_user, self.sio_token),\n verify=False)\n r = self._check_response(r, r_uri)\n response = r.json()\n\n return r, response", "def test_get(self):\n self.assertEqual(\n self.attempts[0],\n self.resource.get(self.attempts[0][_ATTEMPT.attempt_id]))", "def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})", "def test_get(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/1/')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n obj = resp.data\n self.assertEqual(obj['id'], 1)\n self.assertEqual(obj['terms'], '0/net')\n self.assertEqual(obj['revision'], 0)\n \n #Test items\n self.assertIn('items', obj)\n self.assertEqual(len(obj['items']), 1)\n item1 = obj['items'][0]\n #self.assertIn('purchasing_units', item1)\n #self.assertEqual(item1['purchasing_units'], 'm')", "def refresh_details(self) -> 'outputs.RefreshDetailsResponse':\n return pulumi.get(self, \"refresh_details\")", "def test_retrieve_recipe(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('id')\n serializer = RecipeSerializer(recipes,many=True)\n\n print(json.dumps(serializer.data, indent=1))\n print('ok')\n print(json.dumps(res.data, indent=1))\n self.assertTrue(res.status_code,status.HTTP_200_OK)\n self.assertEqual(res.data,serializer.data)", "def test_get_rate_plan_by_product(self):\n pass", "def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )", "def test_nascardrivers_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/nascardrivers/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_quote_guest_payment_method_management_v1_get_get(self):\n pass", "def test_get_metadata_for_rate_plan(self):\n pass", "def test_get_restaurant_review_list_fail(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 401)", "def test_smoker_get(self):\n pass", "def test_detail(self):\n self.assertEqual(self.product_1.id, 1)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {\n 'id': self.product_1.id,\n 'name': self.product_1.name,\n 'sku': self.product_1.sku,\n 'category': self.product_1.category.id,\n 'description': self.product_1.description,\n 'price': str(self.product_1.price),\n 'created': '2018-12-20T10:15:30Z',\n 'featured': self.product_1.featured\n }\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(response.json(), expected)", "def test_api_response(self):\n # url = 'http://127.0.0.1:8000/api/aircraft/'\n url = reverse('airlines:aircraft-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_retrive_recipe_detail(self):\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user))\n recipe.ingredient.add(create_sample_ingredient(user=self.sample_user))\n\n detail_URL = get_detail_URL(recipe.id)\n res = self.client.get(detail_URL)\n\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)", "def test_detail(request, pk, format=None):\n try:\n snippet = Base_price.objects.get(pk=pk)\n except Base_price.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = Base_priceSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = Base_priceSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_beneficiaries_retrieve_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve', kwargs={'pk': 1})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n expected_response = {\n 'id': 1111,\n 'first_name': 'Jhon',\n 'last_name': 'Doe',\n 'user_id': 1001,\n 'telegram_id': None\n }\n\n response = self.client.get(self.url)\n self.assertJSONEqual(json.dumps(expected_response), json.loads(response.content))\n self.assertEqual(response.status_code, 200)", "def test_response(self):\n\n from rubber import settings, resource\n settings.RUBBER_MOCK_HTTP_RESPONSE = \"\"\"{\"took\":2,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":2,\"max_score\":1.0,\"hits\":[{\"_index\":\"auth\",\"_type\":\"user\",\"_id\":\"6\",\"_score\":1.0, \"_source\" : {\"username\": \"guillaume\", \"first_name\": \"\", \"last_name\": \"\", \"is_active\": true, \"is_superuser\": false, \"is_staff\": false, \"last_login\": \"2012-08-02T08:30:11\", \"groups\": [], \"user_permissions\": [], \"password\": \"pbkdf2_sha256$10000$M1nRKJfbvdQf$ouX5u9FOUF/MKhhwuwYbiuoVidFITsBrEstGBB4mzZA=\", \"email\": \"somemail@test.com\", \"date_joined\": \"2012-08-02T08:30:11\"}},{\"_index\":\"auth\",\"_type\":\"user\",\"_id\":\"8\",\"_score\":1.0, \"_source\" : {\"username\": \"stephane\", \"first_name\": \"\", \"last_name\": \"\", \"is_active\": true, \"is_superuser\": false, \"is_staff\": false, \"last_login\": \"2012-08-02T09:14:38\", \"groups\": [], \"user_permissions\": [], \"password\": \"pbkdf2_sha256$10000$ORDHZAnNqTwF$UGmkUCyH0/uh1ruP93ZSTyog9Wi5g2qc+m/fxowigFs=\", \"email\": \"othermail@test.com\", \"date_joined\": \"2012-08-02T09:14:38\"}}]}}\"\"\"\n\n requestmock = RequestMock()\n resource.requests = requestmock\n\n response = self.Article.elasticsearch.search({})\n \n self.assertEquals(2, response.json['took'])\n\n from rubber.response import Response\n self.assertTrue(isinstance(response, Response))", "def test_get_risk_profile_all_using_get(self):\n pass", "def test_accounting_gateways_resource_methods(self, mock_url):\n account_id = 1234\n resource_id = 2345\n\n list_response = {\"gateways\": [], \"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}\n single_response = {}\n with patch.object(AccountingResource, \"_request\", return_value=list_response) as mock_request:\n self.freshBooksClient.gateways.list(account_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n self.freshBooksClient.gateways.delete(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.get(account_id, resource_id)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.create(account_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.update(account_id, resource_id, {})", "def test_resourcenotfound():\n\n URL_STR = \"http://52.24.157.193:5000/api/fibonacci/foo\"\n response = requests.get( URL_STR )\n data = response.json()\n assert response.status_code == 404", "def test_get(self):\n pass", "def test_retrieve(self):\n stats_data = self.stats_data\n facility = self.facility\n\n obj = FacilityPatientStatsHistory.objects.create(\n facility=facility, entry_date=datetime.date(2020, 4, 1), **stats_data\n )\n\n response = self.client.get(self.get_url(entry_id=obj.external_id), format=\"json\")\n self.assertDictEqual(\n response.json(), self.get_detail_representation(stats_data, facility=facility),\n )", "def test_client_verification_retrieve(self):\n pass", "def testGet(self):\n response = self.runGet(self.root, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_get__gate_some(self):\n testing_config.sign_out()\n self.vote_1_1.put() # Found.\n self.vote_2_1.put() # On a different gate.\n\n with test_app.test_request_context(self.request_path + '/1'):\n actual_response = self.handler.do_get(\n feature_id=self.feature_id, gate_id=self.gate_1_id)\n\n self.assertEqual({'votes': [self.vote_expected1]}, actual_response)", "def test_get_readiness(self):\n response = self.client.open('/api/v1//readiness',\n method='GET',\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def testGet(self):\n response = self.runGet(self.root, sequencer=self.hiseq2000.vendor_id)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_0100_activationkey_resend_get(self):\n response = self.fetch(\n '/activation_resend', method=\"GET\", follow_redirects=False,\n )\n self.assertEqual(response.code, 200)", "def test_get_all_ordes(self):\n test_client = app.test_client()\n test_client.post('/api/v1/orders', data=json.dumps(dict(\n order_number=\"order_number\", order_description=\"order_description\",\n order_price=\"order_price\", size=\"size\")), content_type='application/json')\n response = test_client.get('/api/v1/orders')\n self.assertEqual(len(json.loads(response.data)), 1)", "def test_page_fetch(self):\n httpretty.register_uri(\n httpretty.GET,\n self.endpoint_url(\"/settlement\"),\n content_type='text/json',\n body='{\"status\": true, \"message\": \"Settlements retrieved\"}',\n status=201,\n )\n\n response = Settlement.fetch(\n start_date=\"2016-09-12T00:00:00.000Z\",\n end_date=\"2016-09-12T00:00:00.000Z\",\n subaccount=\"subaccount\"\n )\n self.assertEqual(response['status'], True)", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def test_basic_fetch(client):\n\n res = client.get('/api/reminders')\n assert res.status_code == 200\n assert res.content_type == 'application/json'", "def get(self, request):\n\n # get query params data\n self.from_currency = request.query_params.get('from_currency', None)\n self.to_currency = request.query_params.get('to_currency', None)\n self.date = request.query_params.get('date', None)\n\n # perform simple validation on query params\n is_not_valid = self.validate()\n if is_not_valid:\n return Response({\"data\":None, \"status\":is_not_valid}, status=status.HTTP_400_BAD_REQUEST)\n\n # try to fetch data from database if exist, else get it from external API and save it in database\n try:\n rate = Rate.objects.get(from_currency=self.from_currency, to_currency=self.to_currency, date=self.date).rate\n\n except:\n response = get(f\"https://www.frankfurter.app/{self.date}?from={self.from_currency}&to={self.to_currency}\")\n\n if response.status_code != 200:\n return Response({\"data\":None, \"status\":response.reason}, status=status.HTTP_404_NOT_FOUND)\n\n rate = response.json()[\"rates\"][self.to_currency]\n self.date = response.json()['date']\n\n # Create a record with the two currencies rate\n Rate.objects.create(from_currency=self.from_currency, to_currency=self.to_currency, date=self.date, rate=rate)\n\n return Response({\"data\":{\n \"date\":self.date, \n \"rate\":f\"1 {self.from_currency} = {rate} {self.to_currency}\"\n }, \n \"status\":\"Successful\"})", "def test_api_get_candidate_by_enrolement_no(self):\r\n self.register_user()\r\n result = self.login_user()\r\n access_token = json.loads(result.data.decode())['access_token']\r\n\r\n rv = self.client().post('/candidate',headers=dict(Authorization=access_token),data=self.candidate) \r\n self.assertEqual(rv.status_code, 201)\r\n\r\n results = json.loads(rv.data.decode())\r\n result = self.client().get('/candidate',headers=dict(Authorization=access_token),data={'enrolement_no':results['enrolement_no']})\r\n self.assertEqual(result.status_code, 200)", "def test_get_healthz(self):\n response = self.client.open(\n '/v1/healthz',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_offers(self):\n pass", "def test_get_restaurant_review_list_success(self):\n client = Client()\n res_id = Restaurant.objects.get(name='TEST_REST').id\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/restaurant/'+str(res_id)+'/')\n self.assertEqual(response.status_code, 200)\n self.assertIn('TEST_CONTENT3', response.content.decode())\n response = client.get('/api/restaurant/'+str(res_id+1)+'/')\n self.assertEqual(response.json(), [])", "def test_get_specific_sale_record(self):\n \n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.get(\n '{}/saleorder'.format(self.base_url), json={\n 'sale_id': 1,\n 'name': \"Sample Bags\",\n 'price': 20,\n 'quantity': 1,\n 'totalamt': 20\n },\n headers=dict(Authorization=token),\n content_type='application/json')\n\n response = self.app_test_client.get(\n '{}/saleorder/1'.format(self.base_url),\n headers=dict(Authorization=token),\n content_type='application/json'\n )\n \n self.assertEqual(response.status_code, 200)", "def test_accounting_system_resource_methods(self, mock_url):\n account_id = 1234\n resource_id = 2345\n\n single_response = {\"system\": {}}\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n self.freshBooksClient.systems.get(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.list(account_id)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.create(account_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.update(account_id, resource_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.systems.delete(account_id, resource_id)", "def test_get_restaurant(self):\n url = \"/get_restaurants\"\n response = app.test_client().get(url)\n response_json = response.json\n with open('expected_responses/restaurants.json', 'r') as f:\n datastore = json.load(f)\n\n assert datastore == response_json, logging.error(\n \"GET Restaurants Failed!\")\n logging.info(\"GET Restaurants API Tested\")", "def test_company_EU_GR_vies_tax(self):\n self.assertEqual(self.policy.get_tax_rate(\"123456\", \"GR\"), (24, False))", "def get_proxy_recommend_tests():\n sessionObj = dpma.LoginRestServer()\n\n # log in Rest Server \n url = dpma.getURLpath(basePath, 'auth/login')\n response = sessionObj.login(url, headers, vCenterPayload)\n assert_equal(response['statusCode'], 200)\n\n # list recommend\n url = dpma.getURLpath(basePath, 'recommend')\n headers['X-CustomTicket'] = response['sessionTicket']\n response = dpma.sendGetRequest(url, headers)\n assert_equal(response.status_code, 206)\n \n # convert Json object to Python object\n resPython = json.loads(response.content)\n\n recList = []\n\n for rec in resPython:\n recList.append(rec['recommendationId'])\n\n # fileObj = open('/home/kevin/recommendid1', 'w')\n # # fileObj.write(response.content)\n # for rec in resPython:\n # fileObj.write(rec['recommendationId'])\n # fileObj.write(\"\\n\")\n\n # fileObj.close()\n \n \n # get recommend\n i = 1\n for recItem in recList:\n recommendId = 'recommend/' + recItem\n url = dpma.getURLpath(basePath, recommendId)\n response = dpma.sendGetRequest(url, headers)\n assert_equal(response.status_code, 200)\n filepath = '/home/kevin/recommendproxy/getrecommendfile' + str(i) + '.json'\n fileObj = open(filepath, 'w')\n fileObj.write(response.content)\n fileObj.close()\n i = i + 1\n \n url = dpma.getURLpath(basePath, 'auth/logout')\n if not 'X-CustomTicket' in headers:\n headers['X-CustomTicket'] = response['sessionTicket']\n response = dpma.logout(url, headers)\n assert_equal(response.status_code, 200)\n\n \n\n\n # return response.headers['Location']", "def test_get_dealer_historical_inventory(self):\n pass", "def TestConnection(self):\n json_response = self._QueryHashes([self._EICAR_SHA256])\n return json_response is not None", "def TestConnection(self):\n json_response = self._QueryHashes([self._EICAR_SHA256])\n return json_response is not None", "def test_api_response_data(self):", "def test_get_pricing_with_correct_instrument():\n res = oanda.get_pricing(CONFIG, 'SPX500_USD')\n assert res[0] == 200\n assert isinstance(res[1], dict)\n # we want a price as result\n assert len(res[1]['prices']) > 0", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def test_wallets_get(self):\n pass", "def test_get_object_dict(self):\n review = self.review[0].get_dict()\n self.assertIsNotNone(review['reviewer_id'])\n self.assertIsNotNone(review['book_id'])\n self.assertEqual(5, review['rate'])", "async def test_get_asset_reefer(client):\n params = [('access_token', 'access_token_example'),\n ('start_ms', 56),\n ('end_ms', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/assets/{asset_id}/reefer'.format(asset_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_API(self):\n print(\"Test API ...\")\n t0 = time.time()\n c = 0\n for trip_headsign in TRIP_HEADSIGN:\n for stop in STOP_A:\n payload = {'format': 'json', 'route_id': \"A\", 'trip_headsign': trip_headsign, 'stop_name': stop}\n req = requests.get('https://applications002.brest-metropole.fr/WIPOD01/Transport/REST/getRemainingTimes',params=payload)\n if len(req.text) < 100 : #API answer 189 characters if it works well\n print(\"API not responding for parameters : {}, {} \".format(trip_headsign, stop))\n c += 1\n else :\n print(\"Params : {}, {} : {}\".format(trip_headsign, stop, req.text))\n duration = time.time() - t0\n print(\"END OF TEST : duration : {} s, {} requests failed\".format(duration, c))", "def get_hashrate_info(self, miner, algo):\n\n # Hack for some miners/pools\n if algo == \"daggerhashimoto\":\n algo = \"ethash\"\n\n # build the hashrate per model key\n key = miner.model.model + \".\" + algo.replace(\"-\", \"\")\n\n # get the hashrate info for this model of miner\n hashrate_info = self.hash_rate_per_model.get(key.lower())\n\n if hashrate_info is not None:\n # are there any overrides set? If so, use those\n hashrate_override = self.hash_rate_overrides.get(miner.id)\n if hashrate_override is not None:\n hashrate_info['expected_rate'] = hashrate_override\n\n return hashrate_info", "def setUp(self):\n # This is a testing key, and keys are free.\n # Also, the API is rate-limited, so there's very little reason to abuse this.\n self.api_key = '2iG9VxVZJYGKRagpaqdxzhiCdgYbbtlkpfYXdUfa'\n self.eir = openei_rates.OpenEIRates(self.api_key)\n\n self.rate = self.eir.get_rate_by_url('https://openei.org/apps/IURDB/rate/view/5c488ad2b718b378f4caf7ea#1__Basic_Information')", "def test_xblockcompletion_get_data_researcher(self):\n data = {\n 'format':'resumen',\n 'course': str(self.course.id)\n }\n response = self.client_data_researcher.get(reverse('xblockcompletion-data:data'), data)\n request = response.request\n r = json.loads(response._container[0].decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(r['status'], 'El reporte de preguntas esta siendo creado, en un momento estará disponible para descargar.')", "def test_train_house_price_model():\n with app.test_client()as c:\n response = c.get('/REST/api/v1.0/train')\n assert response.status_code == 201", "def test_rate_limited(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPConflict)\n self._check_response(response, 105)", "def test_retrieve_successsfully(self):\n create_sample_recipe(user=self.user)\n create_sample_recipe(user=self.user, title='Snack')\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(serializer.data, res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_api_rivers(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load rivers from url specified in api base\n r = requests.get(r['rivers']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('rivers', r)\n self.assertIn('Androscoggin River', r['rivers'][0]['name'])\n r = requests.get(r['next']).json()\n self.assertIn('Wild River', r['rivers'][0]['name'])", "def test_mineral_detail_view(self):\n resp = self.client.get(reverse(\n 'minerals:detail',\n kwargs={'name': self.mineral.url_name}))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(self.mineral, resp.context['mineral'])", "def test_get_one(self):\n response = self.client.get('/api/v1/parcels/100')\n self.assertEqual(response.status_code, 200)", "def getInfo(self):\n self.info = requests.get(G.api + self.testId + '/snapshots/' + self.hash, auth=(G.username, G.authkey)).json()\n return self.info", "def test_get_product_rate_plan_by_id(self):\n pass", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_companies_company_id_data_tax_rates_get(self):\n pass" ]
[ "0.63951224", "0.6310697", "0.6241689", "0.62342453", "0.60122466", "0.59688497", "0.5942244", "0.5845871", "0.5801503", "0.578017", "0.5748117", "0.5727169", "0.57210433", "0.5657565", "0.56426114", "0.56349385", "0.5620731", "0.56199795", "0.5606225", "0.5600434", "0.5599018", "0.55868405", "0.5558119", "0.55354196", "0.5524954", "0.5512352", "0.5510392", "0.5486031", "0.5483743", "0.5475922", "0.54681695", "0.5465359", "0.54614055", "0.5441254", "0.54360867", "0.5419385", "0.54112047", "0.54037756", "0.53984654", "0.539735", "0.53915477", "0.53786635", "0.5360364", "0.5354889", "0.5351451", "0.53466123", "0.53447676", "0.53398454", "0.53392255", "0.5337336", "0.5329047", "0.53260523", "0.5322501", "0.5322479", "0.53079426", "0.5299915", "0.5288887", "0.52853554", "0.528486", "0.5283856", "0.5281256", "0.52779675", "0.5273153", "0.52654654", "0.52546954", "0.525246", "0.52465004", "0.5246359", "0.52412647", "0.5237535", "0.52354467", "0.52351946", "0.5234492", "0.5233212", "0.523227", "0.52316576", "0.52277666", "0.52273715", "0.52273715", "0.5222312", "0.52076644", "0.5204201", "0.520324", "0.5199521", "0.5193864", "0.51919407", "0.5189075", "0.5178942", "0.5178577", "0.5173589", "0.5173447", "0.5172809", "0.5172062", "0.51608473", "0.51579654", "0.5156288", "0.5154397", "0.51518714", "0.51518714", "0.51453894" ]
0.74108505
0
A method that returns the difference between two operands
def calc(operand_1, operand_2): return operand_1 - operand_2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtraction(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)", "def subtract(*args):\n return args[0] - reduce(lambda x, y: x + y, args[1:])", "def minus(self, a, b):\n return a - b", "def calc(operand_a, operand_b):\n return operand_a - operand_b", "def subtraction(x, y):\n return x - y", "def subtract(first, second):\n return first - second", "def subtract(x, y):\n\n return x - y", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def diff(*args):\n return reduce(lambda x, y: x - y, args)", "def substract(x, y):\n return y - x", "def subtract(self, other):\n return self.add(other.neg())", "def subtraction(num_1, num_2):\n return num_1 - num_2", "def subtraction(number1, number2):\n return number1 - number2", "def subtract(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n difference = str(args[0] - args[1])\n return difference", "def subtraction(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a - b", "def diff(self, x1, x2):\n return x2 - x1", "def subtract(num1, num2):\n return num1 - num2", "def subtract(num1, num2):\n return num1 - num2", "def subtract(num1, num2):\n return num1 - num2", "def subtractor(a,b): \n return a-b # i output a value by using the return statement", "def subtraction(num1, num2):\n difference = num1 - num2\n return difference", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def diff(y, x):\n if y == x: return ONE\n elif not y.args: return ZERO\n else:\n u, op, v = y.args[0], y.op, y.args[-1]\n if op == '+': return diff(u, x) + diff(v, x)\n elif op == '-' and len(args) == 1: return -diff(u, x)\n elif op == '-': return diff(u, x) - diff(v, x)\n elif op == '*': return u * diff(v, x) + v * diff(u, x)\n elif op == '/': return (v*diff(u, x) - u*diff(v, x)) / (v * v)\n elif op == '**' and isnumber(x.op):\n return (v * u ** (v - 1) * diff(u, x))\n elif op == '**': return (v * u ** (v - 1) * diff(u, x)\n + u ** v * Expr('log')(u) * diff(v, x))\n elif op == 'log': return diff(u, x) / u\n else: raise ValueError(\"Unknown op: %s in diff(%s, %s)\" % (op, y, x))", "def subtract(a, b):\n print(\"SUBTRACTING %d - %d\" % (a, b))\n return a - b", "def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})", "def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})", "def subtract(num1, num2):\n difference = num1 - num2\n return difference", "def reverse_difference():", "def subtract(num1, num2):\n \n difference = num2 - num1\n return difference", "def subtract(x: int, y: int):\n return x - y", "def difference(num1, num2):\n\n # Return the calculated value\n return abs(num1 - num2)", "def subtract(value1, value2):\n return 1 / (1.0 / value1 - 1.0 / value2)", "def sub(self, a, b):\n return a - b", "def subtract(self, m): \n f = m.negate()\n return self.add(f)", "def subtract(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n difference = str(ft.reduce(oper.sub,values))\n\n return difference", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def difference(self, other): # type: (Term) -> Term\n return self.intersect(other.inverse)", "def calculate_difference(mark1, mark2):\n\n return mark1 - mark2", "def subtraction(self, first_value, second_value):\n return first_value - second_value", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def sub(o1, o2):\n return o1-o2", "def __sub__(self, other):\n return Difference(self, other)", "def resta(x, y):\n return x - y", "def subtract_numbers(x,y):\n return x - y", "def __sub__(self, other):\n return self._operation_sub(self, other)", "def minusRes(res1, res2):\n return [(x - y) for x, y in zip(res1, res2)]", "def test_diff():\n hs = LocalSpace(\"0\")\n A = OperatorSymbol('A', hs=hs)\n B = OperatorSymbol('B', hs=hs)\n alpha, t = symbols('alpha, t')\n assert Commutator(alpha * t ** 2 * A, t * B).diff(t) == (\n 3 * alpha * t ** 2 * Commutator(A, B)\n )\n assert Commutator.create(alpha * t ** 2 * A, t * B).diff(t) == (\n 3 * alpha * t ** 2 * Commutator(A, B)\n )\n assert Commutator(A, B).diff(t) == ZeroOperator", "def vector_subtract(v1, v2):\n return v1[0] - v2[0], v1[1] - v2[1]", "def __sub__(self, other):\n return self.subtract(other)", "def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)", "def minus(lhs: list, rhs: list) -> list:\n return [lhs[i] - rhs[i] for i in range(len(lhs))]", "def subtract(*args):\n body = ['<h1>Subtraction Calculator</h1>']\n diff = reduce(lambda x,y: x - y, map(int,args))\n body.append(f'Total equals: {diff}')\n return '\\n'.join(body)", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self, other):\n return self + other.__neg__()", "def subtract(self, other):\n warnings.warn(\"`BaseOperator.subtract` method is deprecated, use\"\n \"`op - other` instead\", DeprecationWarning)\n return self._add(-other)", "def __rsub__(self, other):\n return self._operation_sub(other, self)", "def sub(x, y):\n return x - y", "def subtract(*args):\n\n result = int(args[0]) - int(args[1])\n\n return str(result)", "def __sub__(self,other):\n return Vector(self.x - other.x, self.y-other.y)\n pass", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def get_difference(self, other, x, y, norm='L2'):\n norms = {'L2': None, 'Linf': numpy.inf}\n field = self.restrict(x, y)\n other = other.restrict(x, y)\n subtracted = field.subtract(other)\n return numpy.linalg.norm(subtracted.values, ord=norms[norm])", "def diff(self, x1, x2):\n return self.State.diff(x1, x2)", "def difference(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoDifference(self, right).to_expr()", "def vd(v2,v1):\n return v2-v1", "def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)", "def diff(self, x1, x2):\n raise NotImplementedError(\"Not implemented yet.\")", "def __neg__(self):\n return UnaryMinus(self)", "def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def difference(self, other):\n return self._geomgen(capi.geom_diff, other)", "def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)", "def subtraction():\r\n error_handler()\r\n f1.delete(0, END)\r\n s1 = float(operand.get())\r\n s2 = float(operator.get())\r\n result = s1 - s2\r\n f1.insert(10, str(result))", "def diff(x):\n return x[1:] - x[:-1]", "def get_minus_ab(a, b): # IN= 2'int' / OUT= 1'foat'\n return float(a-b)", "def assurance(a, b):\n return a - b", "def symmetric_diff(a,b):\n return a ^ b", "def __sub__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator-other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def sub(a,b):\r\n return a-b", "def test__vector_subtraction__given_two_vectors__return_correct_vector():\n assert Vector((0, 1, 2)) - Vector((3, 4, 5)) == Vector((-3, -3, -3))", "def subtraction(numb1, numb2):\r\n return f\"Your result: {numb1-numb2}\"", "def subtract(self, other, label=None, atol=1.0E-12):\n # check the two solutions share the same grid\n assert numpy.allclose(self.x, other.x, atol=atol)\n assert numpy.allclose(self.y, other.y, atol=atol)\n assert self.values.shape == other.values.shape\n if not label:\n label = self.label + '-subtracted'\n return Field(label=label,\n time_step=self.time_step,\n x=self.x, y=self.y,\n values=self.values - other.values)", "def difference_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:\n return vectorops.mul(vectorops.sub(a,b),1.0/dt)", "def __rsub__(self, left):\n return left - self.value()", "def __rsub__(self, other):\n\t\treturn (-self).__add__(float(other))", "def subtractor(a, b): \n print(\"I'm a function. My name is {}\".format(subtractor.__name__))\n print(\"I'm about to subtract {} and {}\\n\\n\".format(a,b))\n return a - b # i output a value by using the return statement", "def subtract(self, other=None, **units):\n if isinstance(other, (datetime, timedelta, relativedelta)):\n return self - other\n\n units = {unit: -units.get(unit, 0) for unit in SHIFT_UNITS}\n\n return self.shift(**units)", "def difference(a, b):\r\n c = [i for i in a + b if i not in a or i not in b]\r\n return c" ]
[ "0.75801355", "0.7548788", "0.7548788", "0.75324667", "0.75270516", "0.74799347", "0.7476643", "0.74425304", "0.7346755", "0.7334118", "0.7311729", "0.7311729", "0.7311729", "0.7274825", "0.71000874", "0.7097167", "0.7027035", "0.70068324", "0.69969356", "0.6982394", "0.6964487", "0.6958316", "0.6922585", "0.6922585", "0.6917717", "0.68958193", "0.68837225", "0.68837225", "0.68837225", "0.68614805", "0.6845702", "0.68362945", "0.6832965", "0.6819893", "0.6747853", "0.67357963", "0.6712847", "0.6697661", "0.66194904", "0.66185147", "0.66117615", "0.6605468", "0.6574893", "0.6562937", "0.6526038", "0.6508588", "0.65011126", "0.649957", "0.6489488", "0.64859974", "0.6440271", "0.643454", "0.64315116", "0.64225626", "0.64224404", "0.6412566", "0.6409559", "0.64053625", "0.6392671", "0.6390705", "0.6390705", "0.63836753", "0.63725483", "0.63629633", "0.63613564", "0.6349808", "0.63369435", "0.63369435", "0.63369435", "0.6331655", "0.6330243", "0.63140666", "0.6308978", "0.6305203", "0.629649", "0.62721294", "0.6267695", "0.6262816", "0.6261896", "0.6261896", "0.62517035", "0.6239771", "0.6231201", "0.62240833", "0.62215555", "0.62151855", "0.62122905", "0.62112856", "0.6198004", "0.61897916", "0.61847854", "0.6184741", "0.617812", "0.61758727", "0.61687136", "0.61574125", "0.6149862", "0.6145657", "0.6130626" ]
0.74922514
6
This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent index numbers. isRed is True if the red team is being created, and will be False if the blue team is being created. As a potentially helpful development aid, this function can take additional stringvalued keyword arguments ("first" and "second" are such arguments in the case of this function), which will come from the redOpts and blueOpts commandline arguments to capture.py. For the nightly contest, however, your team will be created without any extra arguments, so you should make sure that the default behavior is what you want for the nightly contest.
def createTeam(firstIndex, secondIndex, isRed, first='OffensiveReflexAgent', second='DefensiveReflexAgent'): return [eval(first)(firstIndex), eval(second)(secondIndex)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createTeam(firstIndex, secondIndex, isRed,\n first = 'ReflexCaptureAgent', second = 'DefensiveReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed):\n\n firstAgent = ModifiedExpectimaxAgent(firstIndex)\n secondAgent = DefensiveReflexAgent(secondIndex)\n\n return [firstAgent, secondAgent]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'HalfReflexAgent', second = 'HalfReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first='MonteCarloTreeSearchCaptureAgent', second='MonteCarloTreeSearchCaptureAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\r\n first = 'DefensiveReflexAgent', second = 'OffensiveReflexAgent'):\r\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'AlternatingAgent', second = 'AlternatingAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflexAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\r\n first = 'GeneralAgent', second = 'DefensiveReflexAgent'):\r\n\r\n # The following line is an example only; feel free to change it.\r\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'AtkAgent', second = 'DefAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'DefensiveAgent', second = 'OffensiveAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\r\n first = 'DummyAgent', second = 'DummyAgent'):\r\n\r\n # The following line is an example only; feel free to change it.\r\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'DumbDefensiveAgent', second = 'QOffensiveAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'DQNAgent1', second = 'DQNAgent2'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'ProcrastinateAgent', second = 'ProcrastinateAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'danielAgent2', second = 'danielAgent2'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'pacai.agents.capture.dummy.DummyAgent',\n second = 'pacai.agents.capture.dummy.DummyAgent'):\n\n # firstAgent = reflection.qualifiedImport(first)\n # secondAgent = reflection.qualifiedImport(second)\n\n firstAgent = OffenseQuantumSlugAgent\n secondAgent = DefenseQuantumSlugAgent\n # secondAgent = OffenseQuantumSlugAgent\n\n return [\n firstAgent(firstIndex),\n secondAgent(secondIndex),\n ]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'FrenchCanadianAgent', second = 'FrenchCanadianAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'TreeAgent', second = 'TreeAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first='UtilityAgent', second='UtilityAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first='', second='DefensiveQAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'QCTFAgent', second = 'QCTFAgent'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first='Attacker', second='Defender'):\n\n# The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'Top', second = 'Bottom'):\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def main_make_teams():\n first = []\n second = []\n i = 0\n while i < 3:\n number1team = randint(1, 3)\n if number1team == 1:\n first.append(Swordsman)\n elif number1team == 2:\n first.append(Archer)\n elif number1team == 3:\n first.append(Mage)\n\n number2team = randint(1, 3)\n if number2team == 1:\n second.append(Swordsman)\n elif number2team == 2:\n second.append(Archer)\n elif number2team == 3:\n second.append(Mage)\n i += 1\n return first, second", "def _construct_agents(self, agent_coords, static_map):\n team_blue = []\n team_red = []\n\n Class = {\n TEAM1_UAV : (AerialVehicle, TEAM1_BACKGROUND),\n TEAM2_UAV : (AerialVehicle, TEAM2_BACKGROUND),\n TEAM1_UGV : (GroundVehicle, TEAM1_BACKGROUND),\n TEAM2_UGV : (GroundVehicle, TEAM2_BACKGROUND),\n TEAM1_UGV2: (GroundVehicle_Tank, TEAM1_BACKGROUND),\n TEAM2_UGV2: (GroundVehicle_Tank, TEAM2_BACKGROUND),\n TEAM1_UGV3: (GroundVehicle_Scout, TEAM1_BACKGROUND),\n TEAM2_UGV3: (GroundVehicle_Scout, TEAM2_BACKGROUND),\n TEAM1_UGV4: (GroundVehicle_Clocking, TEAM1_BACKGROUND),\n TEAM2_UGV4: (GroundVehicle_Clocking, TEAM2_BACKGROUND),\n }\n\n for element, coords in agent_coords.items():\n if coords is None: continue\n for coord in coords:\n Vehicle, team_id = Class[element]\n cur_ent = Vehicle(coord, static_map, team_id, element)\n if team_id == TEAM1_BACKGROUND:\n team_blue.append(cur_ent)\n elif team_id == TEAM2_BACKGROUND:\n team_red.append(cur_ent)\n\n return team_blue, team_red", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def getTeam(self, gameState):\n\n if (self.red):\n return gameState.getRedTeamIndices()\n else:\n return gameState.getBlueTeamIndices()", "def step(self, entities_action=None, cur_suggestions=None):\n\n if self.is_done:\n print('done frame')\n info = {\n 'blue_trajectory': self._blue_trajectory,\n 'red_trajectory': self._red_trajectory,\n 'static_map': self._static_map,\n 'red_reward': 0,\n 'rewards': self._rewards,\n 'saved_board_rgb': self._saved_board_rgb,\n 'saved_blue_obs_rgb': self._saved_blue_obs_rgb,\n 'saved_red_obs_rgb': self._saved_red_obs_rgb,\n 'flag_sandbox': self._FLAG_SANDBOX,\n }\n return self.get_obs_blue, 0, self.is_done, info\n\n self.run_step += 1\n indiv_action_space = len(self.ACTION)\n\n if self.CONTROL_ALL:\n assert self.RED_STEP == 1\n assert entities_action is not None, 'Under CONTROL_ALL setting, action must be specified'\n assert (type(entities_action) is list) or (type(entities_action) is np.ndarray), \\\n 'CONTROLL_ALL setting requires list (or numpy array) type of action'\n assert len(entities_action) == len(self._team_blue+self._team_red), \\\n 'You entered wrong number of moves.'\n\n move_list_blue = entities_action[:len(self._team_blue)]\n move_list_red = entities_action[-len(self._team_red):]\n\n # Move team1\n positions = []\n for idx, act in enumerate(move_list_blue):\n if self.STOCH_TRANSITIONS and self.np_random.rand() < self.STOCH_TRANSITIONS_EPS:\n if self._policy_blue is not None and not self._policy_blue._random_transition_safe:\n act = 0\n else:\n act = self._stoch_transition(self._team_blue[idx].get_loc())\n self._team_blue[idx].move(self.ACTION[act], self._env, self._static_map)\n positions.append((self._team_blue[idx].get_loc(), self._team_blue[idx].isAlive))\n self._blue_trajectory.append(positions)\n\n\n # Move team2\n if self._FLAG_SANDBOX:\n move_list_red = []\n positions = []\n for idx, act in enumerate(move_list_red):\n if self.STOCH_TRANSITIONS and self.np_random.rand() < self.STOCH_TRANSITIONS_EPS:\n if self._policy_red is not None and not self._policy_red._random_transition_safe:\n act = 0\n else:\n act = self._stoch_transition(self._team_red[idx].get_loc())\n self._team_red[idx].move(self.ACTION[act], self._env, self._static_map)\n positions.append((self._team_red[idx].get_loc(), self._team_red[idx].isAlive))\n self._red_trajectory.append(positions)\n\n else:\n # Move team1\n if entities_action is None:\n # Use predefined policy\n try:\n move_list_blue = self._policy_blue.gen_action(self._team_blue, self.get_obs_blue)\n except Exception as e:\n print(\"No valid policy for blue team and no actions provided\", e)\n traceback.print_exc()\n exit()\n elif type(entities_action) is int:\n # Action given in Integer\n move_list_blue = []\n if entities_action >= len(self.ACTION) ** len(self._team_blue):\n sys.exit(\"ERROR: You entered too many moves. There are \" + str(len(self._team_blue)) + \" entities.\")\n while len(move_list_blue) < len(self._team_blue):\n move_list_blue.append(entities_action % indiv_action_space)\n entities_action = int(entities_action / indiv_action_space)\n else: \n # Action given in array\n if len(entities_action) != len(self._team_blue):\n sys.exit(\"ERROR: You entered wrong number of moves. There are \" + str(len(self._team_blue)) + \" entities.\")\n move_list_blue = entities_action\n\n positions = []\n for idx, act in enumerate(move_list_blue):\n if self.STOCH_TRANSITIONS and self.np_random.rand() < self.STOCH_TRANSITIONS_EPS:\n if self._policy_blue is not None and not self._policy_blue._random_transition_safe:\n act = 0\n else:\n act = self._stoch_transition(self._team_blue[idx].get_loc())\n self._team_blue[idx].move(self.ACTION[act], self._env, self._static_map)\n positions.append((self._team_blue[idx].get_loc(), self._team_blue[idx].isAlive))\n self._blue_trajectory.append(positions)\n\n # Move team2\n if not self._FLAG_SANDBOX and self.run_step % self.RED_DELAY == 0:\n for _ in range(self.RED_STEP):\n try:\n move_list_red = self._policy_red.gen_action(self._team_red, self.get_obs_red)\n except Exception as e:\n print(\"No valid policy for red team\", e)\n traceback.print_exc()\n exit()\n\n positions = []\n for idx, act in enumerate(move_list_red):\n if self.STOCH_TRANSITIONS and self.np_random.rand() < self.STOCH_TRANSITIONS_EPS:\n if self._policy_red is not None and not self._policy_red._random_transition_safe:\n act = 0\n else:\n act = self._stoch_transition(self._team_red[idx].get_loc())\n self._team_red[idx].move(self.ACTION[act], self._env, self._static_map)\n positions.append((self._team_red[idx].get_loc(), self._team_red[idx].isAlive))\n self._red_trajectory.append(positions)\n\n finish_move=False\n for i in self._team_red:\n if i.isAlive and not i.is_air:\n locx, locy = i.get_loc()\n if self._static_map[locx][locy] == TEAM1_FLAG:\n finish_move=True\n if finish_move: break\n\n self._create_observation_mask()\n \n # Update individual's memory\n if self.INDIV_MEMORY == \"fog\":\n if self.run_step % self.COM_FREQUENCY == 1:\n update = np.logical_or.reduce([agent.memory for agent in self._team_blue])\n for agent in self._team_blue:\n agent.update_memory()\n agent.share_memory(update)\n update = np.logical_or.reduce([agent.memory for agent in self._team_red])\n for agent in self._team_red:\n agent.update_memory()\n agent.share_memory(update)\n else:\n for agent in self._agents:\n agent.update_memory()\n \n # Run interaction\n target_agents, revive_agents = [], []\n for agent in self._agents:\n if agent.isAlive and not agent.is_air:\n target_agents.append(agent)\n else:\n revive_agents.append(agent)\n if self.RESPAWN_AGENT_DEAD:\n agent.revive()\n num_blue_killed = 0\n num_red_killed = 0\n if len(target_agents) > 0:\n new_status = self._interaction(target_agents)\n for idx, entity in enumerate(target_agents):\n if new_status[idx] == False: # Agent is killed during the interaction\n if entity.team == TEAM1_BACKGROUND:\n num_blue_killed += 1\n elif entity.team == TEAM2_BACKGROUND:\n num_red_killed += 1\n # Change status\n for status, entity in zip(new_status, target_agents):\n entity.isAlive = status\n\n # Check win and lose conditions\n blue_point, red_point = 0.0, 0.0 \n has_alive_entity = False\n for agent in self._team_red:\n if agent.isAlive and not agent.is_air:\n has_alive_entity = True\n locx, locy = agent.get_loc()\n if self._static_map[locx][locy] == TEAM1_FLAG: # TEAM 1 == BLUE\n self.blue_flag_captured = True\n red_point += 1.0\n if self.RESPAWN_FLAG: # Regenerate flag\n self._static_map[locx][locy] = TEAM1_BACKGROUND\n self._env[locx][locy][2] = 0\n candidate = np.logical_and(self._env[:,:,1]==REPRESENT[TEAM1_BACKGROUND], self._env[:,:,4]!=REPRESENT[TEAM1_UGV])\n coords = np.argwhere(candidate)\n newloc = coords[np.random.choice(len(coords))]\n self._static_map[newloc[0]][newloc[1]] = TEAM1_FLAG\n self._env[newloc[0]][newloc[1]][2] = REPRESENT[TEAM1_FLAG]\n if self.RESPAWN_AGENT_AT_FLAG:\n agent.revive()\n else:\n self.red_win = True\n \n # TODO Change last condition for multi agent model\n if not has_alive_entity and not self._FLAG_SANDBOX and self.mode != \"human_blue\":\n self.blue_win = True\n self.red_eliminated = True\n\n has_alive_entity = False\n for agent in self._team_blue:\n if agent.isAlive and not agent.is_air:\n has_alive_entity = True\n locx, locy = agent.get_loc()\n if self._static_map[locx][locy] == TEAM2_FLAG:\n self.red_flag_captured = True\n blue_point += 1.0\n if self.RESPAWN_FLAG: # Regenerate flag\n self._static_map[locx][locy] = TEAM2_BACKGROUND\n self._env[locx][locy][2] = 0\n candidate = np.logical_and(self._env[:,:,1]==REPRESENT[TEAM2_BACKGROUND], self._env[:,:,4]!=REPRESENT[TEAM2_UGV])\n coords = np.argwhere(candidate)\n newloc = coords[np.random.choice(len(coords))]\n self._static_map[newloc[0]][newloc[1]] = TEAM2_FLAG\n self._env[newloc[0]][newloc[1]][2] = REPRESENT[TEAM2_FLAG]\n if self.RESPAWN_AGENT_AT_FLAG:\n agent.revive()\n else:\n self.blue_win = True\n \n if not has_alive_entity:\n self.red_win = True\n self.blue_eliminated = True\n\n if self.RESPAWN_FLAG:\n if self.run_step >= self.MAX_STEP:\n self.is_done = True\n if self.blue_points > self.red_points:\n self.blue_win = True\n self.red_win = False\n elif self.blue_points < self.red_points:\n self.blue_win = False\n self.red_win = True\n elif self._FLAG_SANDBOX:\n if self.run_step >= self.MAX_STEP:\n self.is_done = True\n self.blue_win = True\n else:\n self.is_done = self.red_win or self.blue_win or self.run_step > self.MAX_STEP\n\n # Calculate Reward\n #reward, red_reward = self._create_reward(num_blue_killed, num_red_killed, mode='instant')\n reward, red_reward = blue_point-red_point, red_point-blue_point\n self.blue_points += blue_point\n self.red_points += red_point\n\n # Debug\n self._rewards.append(reward)\n if self.SAVE_BOARD_RGB:\n self._saved_board_rgb.append(self.get_full_state_rgb)\n if self.SAVE_BLUE_OBS:\n self._saved_blue_obs_rgb.append(self.get_obs_blue_rgb)\n if self.SAVE_RED_OBS:\n self._saved_red_obs_rgb.append(self.get_obs_red_rgb)\n\n # Pass internal info\n info = {\n 'blue_trajectory': self._blue_trajectory,\n 'red_trajectory': self._red_trajectory,\n 'static_map': self._static_map,\n 'red_reward': red_reward,\n 'rewards': self._rewards,\n 'saved_board_rgb': self._saved_board_rgb,\n 'saved_blue_obs_rgb': self._saved_blue_obs_rgb,\n 'saved_red_obs_rgb': self._saved_red_obs_rgb,\n 'flag_sandbox': self._FLAG_SANDBOX,\n }\n\n return self.get_obs_blue, reward, self.is_done, info", "def get_team(self, game_state):\n if self.red:\n return game_state.get_red_team_indices()\n else:\n return game_state.get_blue_team_indices()", "def build_team_two(self):\n print(\"\\nCreate your secon team!\")\n\n\n self.team_two = Team(self.usr_input(\"Give your team a name! \"))\n hero1 = Hero(self.usr_input(\"Name your SUPER HERO! \"))\n # hero2 = Hero(self.usr_input(\"Name your second SUPER HERO! \"))\n ability_name1 = self.usr_input(\"What ability does {} have? \".format(hero1.name))\n # ability_name2 = self.usr_input(\"What ability does {} have? \".format(hero2.name))\n ability_lvl1 = self.usr_input(\"What's {} ability level? \".format(hero1.name))\n # ability_lvl2 = self.usr_input(\"What's {}ability level? \".format(hero2.name))\n ability1 = Ability(ability_name1, ability_lvl1)\n # ability2 = Ability(ability_name2, ability_lvl2)\n hero1.add_ability(ability1)\n # hero2.add_ability(ability2)\n weapon1 = Weapon(self.usr_input(\"What weapon {} using? \".format(hero1.name)), random.randint(1, 5) * 10)\n # weapon2 = Weapon(self.usr_input(\"What weapon {} using? \".format(hero2.name)), random.randint(1, 5) * 10)\n hero1.add_ability(weapon1)\n # hero2.add_ability(weapon2)\n self.team_two.add_hero(hero1)\n # self.team_two.add_hero(hero2)", "def create_teams():\n participants = State.get().participants()\n amount = len(participants)\n if amount < 4:\n return None, None\n lst = participants\n for i in range(3):\n shuffle(lst)\n teams = [lst[i:i+2] for i in range(0, amount, 2)]\n\n # Make last team of three persons if not even number.\n if amount % 2 != 0:\n teams[-2].append(teams[-1][0])\n del(teams[-1])\n\n names = TEAM_NAMES\n shuffle(names)\n return teams, names[0:len(teams)]", "def form_agents(n, r, a, b, agents):\n for a_ind, b_ind in izip(a, b):\n #util_fn = random.choice([util.Linear, util.CES, util.CobbDouglas])\n #util_fn = util_fn.rand(p, n)\n #util_fn = util.Linear.rand(n, r, a_ind)\n\n agent = Agent.rand(util.Linear, n, r, a_ind, b_ind)\n agents.add(agent)", "def getTeam(self):\n return [\"The A-Team\", \"some other bloke\"]", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n self.start = gameState.getAgentPosition(self.index)\n CaptureAgent.registerInitialState(self, gameState)\n\n \"G A M E K E Y L O C A T I O N S D E T E R M I N A T I O N\"\n if self.red:\n leftEdge = gameState.data.layout.width / 2\n rightEdge = gameState.data.layout.width - 2 #don't need the last wall\n self.safeColumn = leftEdge - 2 # -1 doesn't always seem to work\n else:\n leftEdge = 1\n rightEdge = gameState.data.layout.width / 2\n self.safeColumn = rightEdge + 2\n\n self.safeSpaces = []\n for h in xrange(1,gameState.data.layout.height-1):\n if not gameState.data.layout.isWall((self.safeColumn, h)):\n self.safeSpaces += [(self.safeColumn, h)]\n\n\n \"S T A T E A S S I G N M E N T\"\n pos = gameState.getAgentState(self.index).getPosition()\n self.friend = min(2 + int(not self.red), 2 - self.index + 2 * int(not self.red))\n friendPos = gameState.getAgentState(self.friend).getPosition()\n opps = [gameState.getAgentState(el).getPosition() for el in [1 - int(not self.red), 3 - int(not self.red)] ]\n\n print \"I am agent\", self.index, \"at position \", pos\n #print \"agent 0:\", gameState.getAgentState(0).getPosition()\n print \"My friend agent\", self.friend, \"is at position \", friendPos\n print \"My first enemy agent is at position \", opps[0]\n print \"My second enemy agent is at position \", opps[1]\n\n self.top = False\n self.undecided = False\n\n if pos[1] > friendPos[1]:\n print \"My friend is lower on the map, and I will take top Quad\"\n self.top = True\n elif pos[1] < friendPos[1]:\n print \"My friend is higher on the map, and I will take bottom Quad\"\n else:\n self.undecided = True\n\n \"F O O D A S S I G N M E N T\"\n self.initFood = self.getFood(gameState).asList()\n self.myFood = self.initFood[:] #this is will be updated during our A* Search for theoretical consumption\n print self.myFood\n\n \"I N I T I A L F O O D A S S I G N M E N T S \"\n\n start = time.time()\n print 'eval time for moves: %.4f' % (time.time() - start)\n\n\n \"D E B U G G I N G\"\n print \"Coloring my safe column white\"\n self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)\n\n print \"Coloring my safe spaces\", self.safeSpaces, \"blue\"\n self.debugDraw(self.safeSpaces, [0,0,1], clear=False)\n\n self.counter = 0\n self.moves = []\n self.intendedCoords =[]\n self.best = None\n\n #new\n print \"Using my sweet time to find next moves during init as agent\", self.index\n self.best = self.ActionLoop(gameState, 140)\n self.moves = self.best.getDir()[1]\n self.counter = len(self.moves)\n self.cacheSize = len(self.moves)\n #new", "def get_teams():", "def get_red():\n # return name of actor, movement speed\n zombies = ['Zombie-1','Zombie-2','Zombie-3']\n return choice(zombies), randint(1,4)", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def multi_agent_example():\n env = holodeck.make(\"CyberPunkCity-FollowSight\")\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 0])\n for i in range(10):\n env.reset()\n env.tick()\n env.act(\"uav0\", cmd0)\n env.act(\"nav0\", cmd1)\n for _ in range(1000):\n states = env.tick()\n pixels = states[\"uav0\"][\"RGBCamera\"]", "def test_2_agents_done_at_once(self):\n # Test highest rating agent\n agent, wait_time = Agent.get(TEST_CUSTOMERS[0])\n self.assertEqual(\n (TEST_AGENTS[1], 0), (agent.agent, wait_time))\n # Test 2 agents done at the same time\n Agent.get(TEST_CUSTOMERS[1])\n agent, wait_time = Agent.get(TEST_CUSTOMERS[2])\n self.assertEqual(\n (TEST_AGENTS[1], 60), (agent.agent, wait_time))", "def test_handle_create_multiple_team_lookup_error(self):\r\n team1 = Team(\"GTID1\", \"team-name1\", \"name1\")\r\n team2 = Team(\"GTID2\", \"team-name2\", \"name2\")\r\n team1.team_leads.add(user)\r\n team2.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team1, team2]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user),\r\n (\"2 teams found with GitHub team name team-name\", 200))", "def generate_goals(num_goals: int) -> List[Goal]:\r\n type_gen = random.randint(0, 1) # Perimeter or Blob\r\n return_list = []\r\n copy_colour = []\r\n for color in COLOUR_LIST:\r\n copy_colour.append(color)\r\n if type_gen == 0: # Perimeter\r\n i = 0\r\n while i < num_goals:\r\n color_gen = random.randint(0, len(copy_colour) - 1)\r\n return_list.append(PerimeterGoal(copy_colour[color_gen]))\r\n copy_colour.remove(copy_colour[color_gen])\r\n i += 1\r\n return return_list\r\n for i in range(num_goals):\r\n color_gen = random.randint(0, len(copy_colour) - 1)\r\n return_list.append(BlobGoal(copy_colour[color_gen]))\r\n copy_colour.remove(copy_colour[color_gen])\r\n return return_list", "def generate_goals(num_goals: int) -> List[Goal]:\r\n goal_lst = []\r\n colour_lst = []\r\n\r\n num = len(COLOUR_LIST) - 1\r\n rand = random.randint(0, 1)\r\n\r\n if rand == 1:\r\n while len(goal_lst) < num_goals:\r\n new = COLOUR_LIST[random.randint(0, num)]\r\n if not colour_lst.__contains__(new):\r\n colour_lst.append(new)\r\n goal_lst.append(PerimeterGoal(new))\r\n while len(goal_lst) < num_goals:\r\n x = COLOUR_LIST[random.randint(0, num)]\r\n if not colour_lst.__contains__(x):\r\n colour_lst.append(x)\r\n goal_lst.append(BlobGoal(x))\r\n return goal_lst", "def create_double_round_robin(teams) -> list:\n round_robin = []\n for team1 in teams:\n for team2 in teams:\n if team1 == team2:\n continue\n round_robin.append([team1, team2])\n return round_robin", "def generate_goals(num_goals: int) -> List[Goal]:\r\n goals = []\r\n colour_choices = []\r\n while len(colour_choices) != num_goals:\r\n index = random.randint(0, len(COLOUR_LIST)-1)\r\n colour = COLOUR_LIST[index]\r\n if colour not in colour_choices:\r\n colour_choices.append(colour)\r\n goal_index = random.randint(0, 2)\r\n if goal_index == 0:\r\n for c in colour_choices:\r\n goals.append(PerimeterGoal(c))\r\n assert len(goals) == num_goals\r\n return goals\r\n else:\r\n for c in colour_choices:\r\n goals.append(BlobGoal(c))\r\n assert len(goals) == num_goals\r\n return goals", "def team_battle(self):\n # deaths1 = 0\n # deaths2 = 0\n #\n # while deaths1 < len(self.team_one.heroes) and deaths2 < len(self.team_two.heroes):\n # self.team_one.attack(self.team_two)\n # self.team_two.attack(self.team_one)\n # if\n # print(\"hi\")\n while self.team_one.find_hero == True and self.team_two.find_hero == True:\n print(\"team_battle\")", "def get_actors(\n world: carla.World, # pylint: disable=no-member\n spawn_point: Optional[Union[int, carla.Location]], # pylint: disable=no-member\n num_vehicles: int,\n num_pedestrians: int,\n) -> Tuple[carla.Vehicle, Sequence[Optional[carla.Vehicle]], # pylint: disable=no-member\n Sequence[Optional[carla.Walker]]]: # pylint: disable=no-member\n # HERO agent.\n _spawn_point = get_spawn_point(world, spawn_point)\n hero = spawn_hero(\n world=world,\n spawn_point=_spawn_point,\n vehicle_id=\"vehicle.ford.mustang\",\n )\n # Other vehicles.\n vehicles = spawn_vehicles(\n world=world,\n num_vehicles=num_vehicles,\n )\n # Other pedestrians.\n pedestrians = spawn_pedestrians(\n world=world,\n num_pedestrians=num_pedestrians,\n )\n return hero, vehicles, pedestrians", "def create_game(agent, other_agent, games_counter, verbose_mode,\n from_db=False, cards_in_hand=13):\n if from_db:\n pass\n # todo(maryna): create single game from db. pay attention to players\n # initialization + the iterator.\n trick_counter = [0, 0, ] # [Team 0, Team 1]\n previous_tricks = []\n game = Game(agent, other_agent, games_counter, trick_counter, verbose_mode,\n previous_tricks, Trick({}), cards_in_hand=cards_in_hand)\n return game", "def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2", "def compare_agents(EnvFactory, AgentFactories, n = 10, steps = 1000):\n envs = [ EnvFactory() for i in range(n) ]\n return [ (A, \n (A, steps, copy.deepcopy(envs))) for A in AgentFactories ]", "def start_tournament(self):\n for i in range(0, len(self.agents)):\n for j in range(i+1, len(self.agents)):\n p1, p2 = self.agents[i], self.agents[j]\n p1_total_win = 0\n p2_total_win = 0\n for game_num in range(self.G):\n p1_wins, p2_wins, actions = self.play_game(p1, p2)\n p1_total_win += p1_wins\n p2_total_win += p2_wins\n print(p1.name + \": \" + str(p1_total_win) + \" wins, \" + p2.name + \": \" + str(p2_total_win) + \" wins\")\n if self.topp_visualization:\n p1_num = p1.filename.split(\"ep_\")[1].split(\".h5\")[0]\n p2_num = p2.filename.split(\"ep_\")[1].split(\".h5\")[0]\n os.chdir(ROOT_DIR)\n self.visualizer.visualize(actions, p1_num + \"_\" + p2_num)\n self.print_result()", "def get_two_armies(self) -> tuple:\n\n if(len(self.armies) < 2):\n print(\"Could not choose an army. must have more than one army on the list\")\n raise Exception\n\n while(True):\n first = R.randint(0, len(self.armies)-1)\n second = R.randint(0, len(self.armies)-1)\n\n if(first != second):\n break\n\n return (self.armies[first], self.armies[second])", "def sense(self, agents, agent_index, top_down_map=None):\n host_agent = agents[agent_index]\n other_agent_dists = {}\n sorted_pairs = sorted(other_agent_dists.items(),\n key=operator.itemgetter(1))\n\n sorting_criteria = []\n for i, other_agent in enumerate(agents):\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_orth)\n dist_between_agent_centers = vec2_l2_norm(rel_pos_to_other_global_frame)\n dist_2_other = dist_between_agent_centers - host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n if dist_between_agent_centers > Config.SENSING_HORIZON:\n # print(\"Agent too far away\")\n continue\n\n if self.agent_sorting_method != \"time_to_impact\":\n time_to_impact = None\n else:\n time_to_impact = compute_time_to_impact(host_agent.pos_global_frame,\n other_agent.pos_global_frame,\n host_agent.vel_global_frame,\n other_agent.vel_global_frame,\n combined_radius)\n\n sorting_criteria.append([i, round(dist_2_other,2), p_orthog_ego_frame, time_to_impact])\n\n clipped_sorted_inds = self.get_clipped_sorted_inds(sorting_criteria)\n clipped_sorted_agents = [agents[i] for i in clipped_sorted_inds]\n\n other_agents_states = np.zeros((Config.MAX_NUM_OTHER_AGENTS_OBSERVED, 7))\n other_agent_count = 0\n for other_agent in clipped_sorted_agents:\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_orth)\n v_parallel_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_prll)\n v_orthog_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_orth)\n dist_2_other = np.linalg.norm(rel_pos_to_other_global_frame) - \\\n host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n other_obs = np.array([p_parallel_ego_frame,\n p_orthog_ego_frame,\n v_parallel_ego_frame,\n v_orthog_ego_frame,\n other_agent.radius,\n combined_radius,\n dist_2_other])\n \n if other_agent_count == 0:\n host_agent.other_agent_states[:] = other_obs\n\n other_agents_states[other_agent_count,:] = other_obs\n other_agent_count += 1\n\n host_agent.num_other_agents_observed = other_agent_count\n\n return other_agents_states", "def main():\n num_matches = 1000\n matchlist = [\n [ RandomAI, RandomAI ],\n #[ VectorAI, RandomAI ],\n #[ VectorAI, VectorAI ],\n #[ LeftmostAI, RandomAI ],\n #[ LeftmostAI, VectorAI ],\n #[ LeftmostAI, LeftmostAI ],\n\t\t#[ RightmostAI, RandomAI ],\n #[ RightmostAI, VectorAI ],\n #[ RightmostAI, LeftmostAI ],\n #[ RightmostAI, RightmostAI ]\n ]\n\n x=0\n while x < len(matchlist):\n \n print (\"Running \" + str(num_matches) + \" matches: \" + matchlist[x][0].__name__ + \" vs \" + matchlist[x][1].__name__)\n counter = 0 \n \n while counter < num_matches:\n counter = counter + 1\n match = Match(player1_type=matchlist[x][0], player2_type=matchlist[x][1], param_print_game_status=False, param_matchgroup=x)\n match.handle_next_move()\n\n if matchlist[x][0] != matchlist[x][1]:\n print (\"Running \" + str(num_matches) + \" matches: \" + matchlist[x][1].__name__ + \" vs \" + matchlist[x][0].__name__)\n counter = 0\n\n while counter < num_matches:\n counter = counter + 1\n match = Match(player1_type=matchlist[x][1], player2_type=matchlist[x][0], param_print_game_status=False, param_matchgroup=x)\n match.handle_next_move()\n x = x+1\n \n print (\"Matches Finished\")", "def multi_start_and_goal_given(self):\n return Grid2DProblem(self.space, set(self.starts), set(self.goals))", "def startAlphaBeta(self):\n start = time.time()\n \n alpha = float(\"-inf\")\n beta = float(\"inf\")\n \n is_max_turn = self.__state.get_max_turn()\n childList = self.__state.get_successors()\n \n choice = (None,float(\"-inf\")) if is_max_turn else (None,float(\"inf\"))\n \n if(len(childList) == 1):\n choice = (childList[0],childList[0].get_utility_value())\n else:\n for c in childList:\n val = self.alphaBeta(c,alpha,beta)\n if is_max_turn:\n if ai_config.Config.AVOID_TIE and c.check_path():\n val = val + (-1 - val)/2\n if val > choice[1]:\n choice = (c,val)\n alpha = val\n else:\n if ai_config.Config.AVOID_TIE and c.check_path():\n val = val + (1 - val)/2\n if val < choice[1]:\n choice = (c,val)\n beta = val \n \n self.__num_explored = len(self.__explored.keys())\n self.__explored.clear()\n \n end = time.time()\n \n self.__time_elapsed = end-start\n \n print(\"Utility: \"+\"{0:.3f}\".format(choice[1]))\n print(\"Nodes Explored: \"+str(self.__num_explored))\n print(\"Time Elapsed: \"+\"{0:.3f} seconds\".format(self.__time_elapsed))\n \n return choice[0]", "async def simulate_even_draw(teams):\n half_len = int(len(teams)/2)\n arr1 = [i for i in range(half_len)]\n arr2 = [i for i in range(half_len, len(teams))][::-1]\n matches = []\n for i in range(len(teams)-1):\n arr1.insert(1, arr2.pop(0))\n arr2.append(arr1.pop())\n for a, b in zip(arr1, arr2):\n matches.append((teams[a], teams[b]))\n return matches", "def matchbetween(self):\n team1_toss_factor, team2_toss_factor = self.toss_factor()\n\n avgScoredByTeam1 = self.team1.attack / self.team2.defense * team1_toss_factor\n avgScoredByTeam2 = self.team2.attack / self.team1.defense * team2_toss_factor\n\n\n while True:\n self.team1score = np.random.poisson(avgScoredByTeam1)\n self.team2score = np.random.poisson(avgScoredByTeam2)\n if self.team1score > self.team2score:\n self.team1.points += 3\n self.team1.won += 1\n self.team2.lost += 1\n self.winner = self.team1\n break\n elif self.team1score < self.team2score:\n self.team2.points += 3\n self.team2.won += 1\n self.team1.lost += 1\n self.winner = self.team2\n break\n else:\n if self.groupcheck is True:\n self.team1.points += 1\n self.team2.points += 1\n self.team1.tie += 1\n self.team2.tie += 1\n break\n self.team1.scored += self.team1score\n self.team2.scored += self.team2score\n self.team1.conceded += self.team2score\n self.team2.conceded += self.team1score\n self.team1.goaldifference += self.team1score-self.team2score\n self.team2.goaldifference += self.team2score-self.team1score", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('--agent1', required=True)\n parser.add_argument('--agent2', required=True)\n parser.add_argument('--num_games', type=int, default=100)\n parser.add_argument('--cards_in_hand', type=int, default=13)\n parser.add_argument('--verbose_mode', type=int, default=1)\n parser.add_argument('--seed', type=int, default=-1)\n\n return parser.parse_args()", "def initializeTeam(self):\n\n # Create two new Learners with different atomic actions\n a1 = randint(0, Trainer.ATOMIC_ACTION_RANGE)\n a2 = randint(0, Trainer.ATOMIC_ACTION_RANGE)\n while a1 == a2:\n a2 = randint(0, Trainer.ATOMIC_ACTION_RANGE)\n\n l1 = Learner(action = a1)\n l2 = Learner(action = a2)\n\n # Create new Team\n team = Team()\n\n # Add Learners to Team\n team.addLearner(l1)\n team.addLearner(l2)\n\n # Add Learners to Learner population\n self.learner_pop.append(l1)\n self.learner_pop.append(l2)\n\n # Add Team to Team populations. Note that all new Teams are, by\n # definition, root teams\n self.team_pop.append(team)", "def build_team_one(self):\n #name of team\n #name of heroes\n #name of ability\n print(\"Lets create your team!\")\n\n self.team_one = Team(self.usr_input(\"Give your team a name! \"))\n hero1 = Hero(self.usr_input(\"Name your SUPER HERO! \"))\n # hero2 = Hero(self.usr_input(\"Name your second SUPER HERO! \"))\n ability_name1 = self.usr_input(\"What ability does {} have? \".format(hero1.name))\n # ability_name2 = self.usr_input(\"What ability does {} have? \".format(hero2.name))\n ability_lvl1 = self.usr_input(\"What's {} ability level? \".format(hero1.name))\n # ability_lvl2 = self.usr_input(\"What's {} ability level? \".format(hero2.name))\n ability1 = Ability(ability_name1, ability_lvl1)\n # ability2 = Ability(ability_name2, ability_lvl2)\n hero1.add_ability(ability1)\n # hero2.add_ability(int(ability2))\n weapon1 = Weapon(self.usr_input(\"What weapon {} using? \".format(hero1.name)), random.randint(1, 5) * 10)\n # weapon2 = Weapon(self.usr_input(\"What weapon {} using? \".format(hero2.name)), random.randint(1, 5) * 10)\n hero1.add_ability(weapon1)\n # hero2.add_ability(weapon2)\n self.team_one.add_hero(hero1)\n # self.team_one.add_hero(hero2)", "def add_agents_and_exit(board, regions, agents, agent_types):\n agent_vals = []\n point_tables = []\n agent_names = []\n agent_types = {'default': DEFAULT_AGENT, **agent_types}\n for agent_type in _fix_random_values(agents):\n agent_type = _fix_random_values(agent_type)\n if agent_type not in agent_types:\n continue\n agent = {**DEFAULT_AGENT, **agent_types[agent_type]}\n agent_val = CellTypes.agent | CellTypes.frozen\n if agent['color'] in COLORS:\n agent_val |= COLORS[agent['color']]\n else:\n logger.error(\"Invalid agent color: '%s'\", agent['color'])\n for flag in agent['flags']:\n if flag in AGENT_PROPERTIES:\n agent_val |= AGENT_PROPERTIES[flag]\n else:\n logger.error(\"Invalid agent property '%s'\", flag)\n agent_vals.append(agent_val)\n point_tables.append(agent['points_table'])\n agent_names.append(agent_type)\n\n if not agent_vals:\n return np.zeros((0,2), dtype=int), np.zeros((0,8,9), dtype=int)\n\n # Add agents to the board\n zero_reg = (regions == 0)\n zero_idx = np.array(np.nonzero(zero_reg)).T\n # ensure that there are not more agents than places to put them:\n agent_vals = agent_vals[:len(zero_idx)]\n agent_locs = zero_idx[\n get_rng().choice(len(zero_idx), len(agent_vals), replace=False)]\n board[tuple(agent_locs.T)] = agent_vals\n\n # Find the location that's as far away from agents as possible while still\n # in the buffer region.\n row_dist = np.abs(np.arange(board.shape[0])[:, np.newaxis] - agent_locs[:,0])\n col_dist = np.abs(np.arange(board.shape[1])[:, np.newaxis] - agent_locs[:,1])\n row_dist = np.sum(np.minimum(row_dist, board.shape[0] - row_dist), axis=-1)\n col_dist = np.sum(np.minimum(col_dist, board.shape[1] - col_dist), axis=-1)\n dist = (row_dist[:, np.newaxis] + col_dist[np.newaxis, :]) * zero_reg\n k = np.argmax(dist)\n exit_loc = k // board.shape[1], k % board.shape[1]\n board[exit_loc] = CellTypes.level_exit | CellTypes.color_r\n\n # Ensure that the player and exit aren't touching any other region\n all_locs = np.append(agent_locs, [exit_loc], axis=0)\n n = np.array([[-1,0,1,-1,0,1,-1,0,1],[-1,-1,-1,0,0,0,1,1,1]]).T\n new_locs = (all_locs[:,np.newaxis] + n).reshape(-1, 2) % board.shape\n regions[tuple(new_locs.T)] = -1\n\n return agent_locs, point_tables, agent_names", "def get_people(team):", "async def generate_captains(self,team_a_channel, team_b_channel):\r\n if len(self.remaining) != 10:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"Please use the command !new and ensure you have 10 players in the channel before selecting captains\")\r\n potential = []\r\n check_prev = self.previous_time and (datetime.now() - self.previous_time).seconds / SECS_TO_HOURS <= TIME_THRESOLD #seconds to hours conversion\r\n for p in self.remaining:\r\n was_captain = p in self.previous_captains.values()\r\n was_in_previous = not check_prev or (check_prev and p in self.previous_players)\r\n blacklisted = get_member_name(p) in self.blacklist\r\n if not was_captain and was_in_previous and not blacklisted:\r\n potential.append(p)\r\n\r\n caps = random.sample(potential, 2) # 2 captains\r\n\r\n for i,team in enumerate(self.captains.keys()):\r\n await self.set_captain(caps[i],team)\r\n\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"The captains are @{} (1st pick) and @{} (2nd pick)\".format(get_member_name(caps[0],lower=False),get_member_name(caps[1],lower=False)))", "def process_team(this_team, nodes, file_obj_out):\n for i, player_one in enumerate(this_team):\n for j, player_two in enumerate(this_team):\n if j > i and nodes[player_one] != nodes[player_two]:\n # write the source id and target id to file\n print(nodes[player_one], nodes[player_two],\n player_one + \" - \" + player_two,\n sep=',', file=file_obj_out)", "def agents_at_goal(self):\r\n return self.searchenv.conv.state_to_tile(self.searchstate.positions) == self.searchenv.goal_tile", "def multi_goal_given(self):\n goals = set(self.goals)\n for start in self.starts:\n yield Grid2DProblem(self.space, set([start]), goals)", "def initialise_cup_competition(bye_list=[]):\n # create a list of all teams\n teams = list(Team.active_objects.all())\n\n # if no. of teams + no. of byes is less than 16 then one or more random byes are required\n random_byes = 16 - len(teams) - len(bye_list)\n\n # if it's more than then there's a problem\n # TODO - raise an error here\n if random_byes < 0:\n print(len(teams), len(bye_list))\n print(\"Error: teams + byes > 16\")\n exit\n\n # add a None for each random bye to be incorporated into the random list\n for i in range(random_byes):\n print(\" adding random bye\")\n teams.append(None)\n\n # randomise the list\n random.shuffle(teams)\n\n print(\"randomised teams:\")\n print(teams)\n\n # flag for monitoring when planned byes are needed\n bye_next = False\n # flag for monitoring byes assigned in previous step\n bye_previous = False\n\n for position in range(1, 17):\n\n # pass over the position when a bye is required\n if bye_next:\n bye_next = False\n continue\n\n # get the next team from the randomised list\n team = teams.pop()\n\n # pass over any Nones in the list and set the previous flag\n if not team and not bye_previous:\n print(\"if not team and not bye_previous\")\n bye_previous = True\n continue\n elif not team:\n print(\"elif not team\")\n # add the team/None back to the list and reshuffle until the next one is note none\n teams.append(team)\n while teams[-1] is not None:\n print(\"Two Nones in a row - shuffling\")\n random.shuffle(teams)\n team = teams.pop()\n\n # deal with planned byes\n if team in bye_list:\n # if the position is an even number we can only assign the position if the bye_previous flag is set\n if position % 2 == 0:\n if bye_previous:\n team.cup_start_position = position\n # in which case we need to add the a bye back to the list to replace the random one\n teams.append(None)\n else:\n # otherwise we need to re-add the team back to the end of the list and pop the next non-bye team\n index = -1\n while len(teams) + index >= 0:\n print(\"executing while loop: index = \", index)\n if teams[index] is not None and teams[index] not in bye_list:\n old_team = team\n team = teams.pop(index)\n teams.append(old_team)\n break\n index -= 1\n team.cup_start_position = position\n else:\n team.cup_start_position = position\n bye_next = True\n else:\n # simply assign the team to the current position\n team.cup_start_position = position\n\n # finally save the team and set previous flag back to False\n team.save()\n bye_previous = False", "def get_lights(bridge):\n\n target_names = [\n \"Console Lamp\",\n \"Bedroom Table Lamp\",\n \"Kitchen light\",\n ]\n\n targets = [light for light in bridge.lights if light.name in target_names]\n\n if len(targets) != len(target_names):\n print(\"%s: not found ... %s\" % (target_names, targets))\n exit(1)\n\n return targets", "def test(self, gamesNum = 100, adversary = None, advDetector = None, render = False, verbose = True, videoPath = None):\n recordVideo = videoPath is not None\n if recordVideo:\n recorder = VideoRecorder(self.env, videoPath)\n\n gameRewards = []\n gameLengths = []\n attacksNumbers = []\n for i in range(gamesNum):\n done = False\n s = utils.preprocess(self.env.reset())\n frames = np.expand_dims(np.repeat(s, 4, 2), 0)\n gameReward = 0.0\n gameLength = 0\n attNum = 0\n while not done:\n actionScores, actionProbs = self.sess.run([self.logits, self.probs], feed_dict={self.inputs:frames})\n isAdvState, advFrames = self._attack(adversary, frames, actionProbs)\n if advDetector is not None:\n advDetector.isAdv(advFrames, isAdvState)\n attNum += isAdvState\n\n for j in range(self.frameSkip):\n sj, r, done, _ = self.env.step(np.argmax(actionScores))\n gameReward += r\n gameLength += 1\n if render:\n self.env.render()\n if recordVideo:\n recorder.capture_frame()\n\n frames = utils.pushframe(frames, utils.preprocess(sj))\n\n gameRewards.append(gameReward)\n gameLengths.append(gameLength)\n attacksNumbers.append(attNum)\n if verbose:\n print(\"Finished test game \" + str(i+1) + \" / \" + str(gamesNum) + \" reward = \" + str(gameReward))\n print('{\"metric\": \"loss\", \"value\":' + str(gameReward) + '}')\n\n print(\"Agent achieved average reward of \" + str(np.mean(gameRewards)) + \" in \" + str(gamesNum) + \" games.\")\n print('{\"metric\": \"loss\", \"value\":' + str(np.mean(gameRewards)) + '}')\n if recordVideo:\n recorder.close()\n\n return gameRewards, gameLengths, attacksNumbers, advDetector", "def run_match(bot1, bot2, seed):\n\n runner = Runner(player_files=(bot1,bot2), options=Options(quiet=4, game_seed=seed))\n scores0, scores1 = runner.run()[0]\n return [scores0, scores1]", "def getOpponents(self, gameState):\n\n if self.red:\n return gameState.getBlueTeamIndices()\n else:\n return gameState.getRedTeamIndices()", "def describe_agents(agentIds=None, filters=None, maxResults=None, nextToken=None):\n pass", "def switch_colors(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][0]\n mutated_genome[index1][0] = mutated_genome[index2][0]\n mutated_genome[index2][0] = temp", "def generate_images(video_path, index_first, index_second):\n cap = cv2.VideoCapture(video_path)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_first)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_first.png'), img)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_second)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_second.png'), img)", "def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n ''' \n Your initialization code goes here, if you need any.\n '''\n\n # Store team and enemy indices\n self.teamIndices = self.getTeam(gameState)\n self.enemyIndices = self.getOpponents(gameState)\n\n # Check how recently we were near the enemy to check if we've knocked him out\n self.nearEnemyCounter = 0\n\n # Set up particle filters to track enemy locations\n self.enemyLocFilters = {}\n for i in self.enemyIndices:\n self.enemyLocFilters[i] = (ParticleFilter(gameState, i,\n gameState.getInitialAgentPosition(i)))", "def flip_agent(self, mutable_agent, immutable_agent, flip='random'):\n if flip == 'random':\n flip = int(np.random.random() < 0.5)\n if flip == 0:\n # in the first flip mutable agent is agent 1 and immutable agent\n # is agent 2.\n return mutable_agent, immutable_agent, flip\n else:\n # in the second flip mutable agent is agent 2 and immutable agent\n # is agent 1.\n return immutable_agent, mutable_agent, flip", "async def simulate_odd_draw(teams):\n half_len = int((len(teams)+1)/2)\n arr1 = [i for i in range(half_len)]\n arr2 = [i for i in range(half_len, len(teams)+1)][::-1]\n matches = []\n for i in range(len(teams)):\n arr1.insert(1, arr2.pop(0))\n arr2.append(arr1.pop())\n for a, b in zip(arr1, arr2):\n if len(teams) not in (a, b):\n matches.append((teams[a], teams[b]))\n return matches", "def get_teams_attending_two_events(event1, event2):\n teams_at_both = []\n teams1 = get_teams(event1)\n teams2 = get_teams(event2)\n for team1 in teams1:\n if team1 in teams2:\n teams_at_both.append(team1)\n return teams_at_both", "def test_mcts_agent(self):\n logging.info(\"Starting test_mcts_agent\")\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, \"../configs/factory_floor_simple.yaml\")\n parameters = getParameters(filename)\n env = FactoryFloor(parameters)\n obs = env.reset()\n\n mctsAgents = []\n\n randomagent = 'aiagents.single.RandomAgent.RandomAgent'\n for robotId in env.action_space.spaces.keys():\n mctsparams = {'treeAgent':{'class': randomagent, 'id':robotId, 'parameters':{} },\n 'rolloutAgent':{'class': randomagent, 'id':robotId, 'parameters':{} }} \n mctsparams['simulator'] = dict(parameters)\n mctsparams['simulator']['fullname'] = \"aienvs.FactoryFloor.FactoryFloor.FactoryFloor\"\n \n mctsAgents.append(MctsAgent(robotId, env.action_space, env.observation_space , mctsparams))\n\n complexAgent = BasicComplexAgent(mctsAgents, env.action_space, env.observation_space)\n\n episode = Episode(complexAgent, env, obs, render=True)\n episode.run()", "def split_red_blue(spec):\n\n redmetals, bluemetals = [], []\n for item in spec:\n for ab in item.metal.absorbers:\n if ab.ionName != 'm1' and ab.b > 0.317: # through out really small b values. .317 chosen b/c 0.316 is a default small value in dude\n if DumpData.is_red(ab, item):\n redmetals.append(ab)\n else:\n bluemetals.append(ab)\n return bluemetals, redmetals", "def test_hist_colors(candidate, expected1, expected2):\n\n assert hist_colors(candidate) == (expected1, expected2)", "def enum():\n\n click.secho('*** Creating Movie to create a BlueRay for...', fg='green')\n crow = _make_document('movie', title='The Crow')\n click.secho(json.dumps(crow, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Trying to create a BlueRay with an *invalid* region code...', fg='green')\n try:\n _make_document('blueray', movie_id=crow['_id'], region_code='D')\n except requests.HTTPError as e:\n click.secho(str(e), fg='red')\n click.secho(json.dumps(e.response.json(), indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Trying to create a BlueRay with a *valid* region code...', fg='green')\n blueray = _make_document('blueray', movie_id=crow['_id'], region_code='FREE')\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')", "def _generate_random_agent(self):\n\n new_random_agent = list(self._all_waypoints)\n random.shuffle(new_random_agent)\n return tuple(new_random_agent)", "def select(self, solutions):\r\n solutions = self.sort_solutions(solutions)\r\n # define coordinates for the two groups\r\n elitists_coords = [x for x in range(self.breeding_rules.elitist_candidates)]\r\n first_discarded_solution = int(len(solutions) - (len(solutions) * self.breeding_rules.discard_rate))\r\n crossover_coords = [x for x in range(first_discarded_solution)]\r\n # fill each breeding group with its possible participants, based on the coordinates defined above\r\n elitists = [solutions[x] for x in elitists_coords]\r\n crossover = [solutions[x] for x in crossover_coords]\r\n return elitists, crossover", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def win_game():\n lists = []\n win = []\n counter = 0\n countr = 0\n countg = 0\n while(1):\n team = input(\"Enter \\\"red\\\" or \\\"green\\\" or press ENTER to stop: \")\n team = team.lower()\n if(team == \"\"):\n break\n elif(team == \"red\" or team == \"green\"):\n lists.append(team)\n counter += 1\n else:\n continue\n for i in range(0, counter, 1):\n if(lists[i] == \"red\"):\n countr += 1\n elif(lists[i] == \"green\"):\n countg += 1\n else:\n continue\n win.append(countr)\n win.append(countg)\n \n return win", "def create_match(team_id, current_matches, match_details, round_number, stats, targets):\n\n current_previous_matches = match_details.loc[\n ((match_details['home_id'] == team_id) | (match_details['away_id'] == team_id)) &\n (match_details['round'] < round_number)]\n\n # Only take the previous 3 matches and sum those stats together\n previous_matches = current_previous_matches.iloc[-3:]\n\n # Find CUR_TEAM's stats\n match_id, team_id, team_name, scheduled, is_home, total_points, goals_for, goals_against, goal_diff, goal_efficiency, \\\n played, win, loss, recent_wins, recent_losses, prev_opp, opp_id, points, goals, opp_goals, \\\n current_formation, opp_formation, game_features = \\\n calculate_stats(team_id, current_matches, previous_matches, stats, targets)\n\n # Calculate the OPPONENTS stats\n if stats:\n print('Current Opponent ID : {0}'.format(opp_id))\n\n # Find OPP_TEAM's stats\n opp_previous_matches = match_details.loc[\n ((match_details['home_id'] == opp_id) | (match_details['away_id'] == opp_id)) &\n (match_details['round'] < round_number)]\n\n opp_previous_matches = opp_previous_matches.iloc[-3:]\n\n _, opp_team_id, opp_team_name, _, opp_is_home, opp_total_points, opp_goals_for, opp_goals_against, opp_goal_diff, opp_goal_efficiency, \\\n opp_played, opp_win, opp_loss, opp_recent_wins, opp_recent_losses, opp_opp, _, _, _, _, _, _, opp_game_features = calculate_stats(opp_id, current_matches, opp_previous_matches, False, False)\n\n if stats:\n print('Previous Opponents of Current Team : {0}'.format(prev_opp))\n\n prev_opp_won_total = 0\n prev_opp_lost_total = 0\n\n for prev_opp_id in prev_opp:\n prev_opp_previous_matches = match_details.loc[\n ((match_details['home_id'] == prev_opp_id) | (match_details['away_id'] == prev_opp_id)) &\n (match_details['round'] < round_number)]\n\n # Only take the previous 3 matches and sum those stats together\n prev_opp_previous_matches = prev_opp_previous_matches.iloc[-3:]\n\n _, _, _, _, _, _, prev_opp_goals_for, prev_opp_goals_against, prev_opp_goal_diff, prev_opp_goal_efficiency, \\\n prev_opp_played, prev_opp_win, prev_opp_loss, _, _, opp_prev_opp, _, prev_opp_points, prev_opp_goals, opp_prev_opp_goals, _, _, \\\n prev_opp_game_features = calculate_stats(prev_opp_id, current_matches, prev_opp_previous_matches, False, False)\n\n prev_opp_won_total += prev_opp_win\n prev_opp_lost_total += prev_opp_loss\n\n opp_opp_won_total = 0\n opp_opp_lost_total = 0\n\n if stats:\n print('Current Opponents of Opponent : {0}'.format(opp_opp))\n\n # Calculate OPPONENTS of the OPPONENTS stats\n for opp_opp_id in opp_opp:\n\n opp_opp_previous_matches = match_details.loc[\n ((match_details['home_id'] == opp_opp_id) | (match_details['away_id'] == opp_opp_id)) &\n (match_details['round'] < round_number)]\n\n # Only take the previous 3 matches and sum those stats together\n opp_opp_previous_matches = opp_opp_previous_matches.iloc[-3:]\n\n opp_opp_match_id, opp_opp_team_id, opp_opp_team_name, scheduled, opp_opp_is_home, opp_opp_total_points, opp_opp_goals_for, opp_opp_goals_against, opp_opp_goal_diff, opp_opp_goal_efficiency, \\\n opp_opp_played, opp_opp_win, opp_opp_loss, opp_opp_recent_wins, opp_opp_recent_losses, opp_opp_opp, _, _, _, _, _, _, opp_opp_game_features = calculate_stats(opp_opp_id, current_matches, opp_opp_previous_matches, False, False)\n opp_opp_won_total += opp_opp_win\n opp_opp_lost_total += opp_opp_loss\n\n if stats:\n print('Opponents of Previous Opponents : {0}'.format(opp_prev_opp))\n\n opp_prev_opp_won_total = 0\n opp_prev_opp_lost_total = 0\n\n # Calculate OPPONENTS of the PREVIOUS OPPONENTS stats\n for opp_prev_opp_id in opp_prev_opp:\n\n opp_prev_opp_previous_matches = match_details.loc[\n ((match_details['home_id'] == opp_prev_opp_id) | (match_details['away_id'] == opp_prev_opp_id)) &\n (match_details['round'] < round_number)]\n\n # Only take the previous 3 matches and sum those stats together\n opp_prev_opp_previous_matches = opp_prev_opp_previous_matches.iloc[-3:]\n\n opp_prev_opp_match_id, opp_prev_opp_team_id, opp_prev_opp_team_name, scheduled, opp_prev_opp_is_home, opp_prev_opp_total_points, opp_prev_opp_goals_for, opp_prev_opp_goals_against, opp_prev_opp_goal_diff, opp_prev_opp_goal_efficiency, \\\n opp_prev_opp_played, opp_prev_opp_win, opp_prev_opp_loss, opp_prev_opp_recent_wins, opp_prev_opp_recent_losses, _, _, _, _, _, _, _, opp_prev_opp_game_features = calculate_stats(\n opp_prev_opp_id, current_matches, opp_prev_opp_previous_matches, False, False)\n opp_prev_opp_won_total += opp_prev_opp_win\n opp_prev_opp_lost_total += opp_prev_opp_loss\n\n if stats:\n print('Opponents of Opponents Opponents : {0}'.format(opp_opp_opp))\n\n opp_opp_opp_won_total = 0\n opp_opp_opp_lost_total = 0\n\n # Calculate OPPONENTS of the OPPONENTS' OPPONENTS' stats\n for opp_opp_opp_id in opp_opp_opp:\n opp_opp_opp_previous_matches = match_details.loc[\n ((match_details['home_id'] == opp_opp_opp_id) | (match_details['away_id'] == opp_opp_opp_id)) &\n (match_details['round'] < round_number)]\n\n opp_opp_opp_match_id, opp_opp_opp_team_id, opp_opp_opp_team_name, scheduled, opp_opp_opp_is_home, opp_opp_opp_total_points, opp_opp_opp_goals_for, opp_opp_opp_goals_against, opp_opp_opp_goal_diff, opp_opp_opp_goal_efficiency, \\\n opp_opp_opp_played, opp_opp_opp_win, opp_opp_opp_loss, opp_opp_opp_recent_wins, opp_opp_opp_recent_losses, _, _, _, _, _, _, _, opp_opp_opp_game_features = calculate_stats(\n opp_opp_opp_id, current_matches, opp_opp_opp_previous_matches, False, False)\n opp_opp_opp_won_total += opp_opp_opp_win\n opp_opp_opp_lost_total += opp_opp_opp_loss\n\n \"\"\" //////////////////////////////////////////////////////////////////////////////////////////////////// \"\"\"\n \"\"\" Collected all the information from relevant matches. Now send through all what we have. \"\"\"\n \"\"\" //////////////////////////////////////////////////////////////////////////////////////////////////// \"\"\"\n # Only calculate SOS + RPI here since they include previous matches\n current_record = np.divide(win, (win + loss))\n opp_record = np.divide(opp_win, (opp_win + opp_loss))\n prev_opp_record = np.divide(prev_opp_win, (prev_opp_win + prev_opp_loss))\n opp_prev_opp_record = np.divide(opp_prev_opp_won_total, (opp_prev_opp_won_total + opp_prev_opp_lost_total))\n sos = np.divide((2 * prev_opp_record) + opp_prev_opp_record, 3)\n rpi = (current_record * .25) + (sos * .75)\n\n feature = {'match_id': match_id, 'team_id': team_id, 'team_name': team_name, 'opp_id': opp_team_id,\n 'opp_name': opp_team_name, 'scheduled': scheduled, 'round': round_number, 'games_played': played,\n 'is_home': is_home, 'current_formation': current_formation, 'current_record': current_record,\n 'opp_record': opp_record, 'goals_for': goals_for, 'opp_goals_for': opp_goals_for,\n 'goals_against': goals_against, 'opp_goals_against': opp_goals_against, 'rpi': rpi,\n 'goals': goals, 'points': points}\n\n game_features = {'current_team': game_features, 'opp_team': opp_game_features }\n\n if stats:\n print(\"//////////////////////////////////////////////////\")\n\n return feature, game_features", "def register_team(self, agents_on_team):\n self.agents_on_team = agents_on_team", "def yield_team(self) -> str: # pragma: no cover", "def cross_detect2(self):\n for agent_idx in range(self.agent_num):\n\n agent = self.agent_list[agent_idx]\n for object_idx in range(len(self.map['objects'])):\n object = self.map['objects'][object_idx]\n\n if not object.can_pass():\n continue\n else:\n #print('object = ', object.type)\n if object.color == 'red' and object.check_cross(self.agent_pos[agent_idx], agent.r):\n\n agent.color = 'red'\n agent.finished = True #when agent has crossed the finished line\n agent.alive = False #kill the agent when finishing the task", "def test_actor_options_complicated(shared_ray_instance):\n\n @ray.remote\n def combine(x, y):\n return x + y\n\n a1 = Actor.options(name=\"a1_v0\")._bind(10)\n res = a1.get.options(name=\"v1\")._bind()\n print(res)\n assert ray.get(res.execute()) == 10\n assert a1.get_options().get(\"name\") == \"a1_v0\"\n assert res.get_options().get(\"name\") == \"v1\"\n\n a1 = Actor.options(name=\"a1_v1\")._bind(10) # Cannot\n a2 = Actor.options(name=\"a2_v0\")._bind(10)\n a1.inc.options(name=\"v1\")._bind(2)\n a1.inc.options(name=\"v2\")._bind(4)\n a2.inc.options(name=\"v3\")._bind(6)\n dag = combine.options(name=\"v4\")._bind(a1.get._bind(), a2.get._bind())\n\n print(dag)\n assert ray.get(dag.execute()) == 32\n test_a1 = dag.get_args()[0] # call graph for a1.get._bind()\n test_a2 = dag.get_args()[1] # call graph for a2.get._bind()\n assert test_a2.get_options() == {} # No .options() at outer call\n # refer to a2 constructor .options() call\n assert (\n test_a2.get_other_args_to_resolve()[\"parent_class_node\"]\n .get_options()\n .get(\"name\")\n == \"a2_v0\"\n )\n # refer to actor method a2.inc.options() call\n assert (\n test_a2.get_other_args_to_resolve()[\"prev_class_method_call\"]\n .get_options()\n .get(\"name\")\n == \"v3\"\n )\n # refer to a1 constructor .options() call\n assert (\n test_a1.get_other_args_to_resolve()[\"parent_class_node\"]\n .get_options()\n .get(\"name\")\n == \"a1_v1\"\n )\n # refer to latest actor method a1.inc.options() call\n assert (\n test_a1.get_other_args_to_resolve()[\"prev_class_method_call\"]\n .get_options()\n .get(\"name\")\n == \"v2\"\n )\n # refer to first bound actor method a1.inc.options() call\n assert (\n test_a1.get_other_args_to_resolve()[\"prev_class_method_call\"]\n .get_other_args_to_resolve()[\"prev_class_method_call\"]\n .get_options()\n .get(\"name\")\n == \"v1\"\n )", "def add_agents(*_coconut_match_args, **_coconut_match_kwargs):\n _coconut_match_check_2 = False\n _coconut_match_set_name_self = _coconut_sentinel\n _coconut_match_set_name_agents = _coconut_sentinel\n _coconut_match_set_name__set_defaults = _coconut_sentinel\n _coconut_match_set_name_named_agents = _coconut_sentinel\n _coconut_FunctionMatchError = _coconut_get_function_match_error()\n if _coconut.sum((_coconut.len(_coconut_match_args) > 0, \"self\" in _coconut_match_kwargs)) == 1:\n _coconut_match_set_name_agents = _coconut_match_args[1:]\n _coconut_match_temp_5 = _coconut_match_kwargs.pop(\"_set_defaults\") if \"_set_defaults\" in _coconut_match_kwargs else True\n _coconut_match_temp_4 = _coconut_match_args[0] if _coconut.len(_coconut_match_args) > 0 else _coconut_match_kwargs.pop(\"self\")\n _coconut_match_set_name__set_defaults = _coconut_match_temp_5\n _coconut_match_set_name_self = _coconut_match_temp_4\n _coconut_match_set_name_named_agents = _coconut_match_kwargs\n _coconut_match_check_2 = True\n if _coconut_match_check_2:\n if _coconut_match_set_name_self is not _coconut_sentinel:\n self = _coconut_match_set_name_self\n if _coconut_match_set_name_agents is not _coconut_sentinel:\n agents = _coconut_match_set_name_agents\n if _coconut_match_set_name__set_defaults is not _coconut_sentinel:\n _set_defaults = _coconut_match_set_name__set_defaults\n if _coconut_match_set_name_named_agents is not _coconut_sentinel:\n named_agents = _coconut_match_set_name_named_agents\n if not _coconut_match_check_2:\n raise _coconut_FunctionMatchError('match def add_agents(self, *agents, _set_defaults=True, **named_agents):', _coconut_match_args)\n\n new_agents = []\n for a in _coconut.itertools.chain.from_iterable(_coconut_reiterable(_coconut_func() for _coconut_func in (lambda: agents, lambda: named_agents.items()))):\n _coconut_match_to_0 = a\n _coconut_match_check_1 = False\n _coconut_match_set_name_name = _coconut_sentinel\n _coconut_match_set_name_actor = _coconut_sentinel\n if (_coconut.isinstance(_coconut_match_to_0, _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_to_0) == 2):\n _coconut_match_set_name_name = _coconut_match_to_0[0]\n _coconut_match_set_name_actor = _coconut_match_to_0[1]\n _coconut_match_check_1 = True\n if _coconut_match_check_1:\n if _coconut_match_set_name_name is not _coconut_sentinel:\n name = _coconut_match_set_name_name\n if _coconut_match_set_name_actor is not _coconut_sentinel:\n actor = _coconut_match_set_name_actor\n if _coconut_match_check_1:\n if not callable(actor):\n a = init_agent(name, actor)\n elif isinstance(actor, Agent):\n a = actor.clone(name=name)\n else:\n a = Agent(name, actor)\n assert isinstance(a, Agent), \"not isinstance({_coconut_format_0}, Agent)\".format(_coconut_format_0=(a))\n new_agents.append(a)\n self.agents += new_agents\n if _set_defaults:\n self.set_defaults(new_agents)\n return self", "def get_companies_and_people(team):", "def _plot_reward_and_trajectories_helper(\n true_reward,\n inferred_reward,\n walls,\n start,\n true_agent,\n inferred_agent,\n filename=\"reward_comparison.png\",\n animate=False,\n):\n from agents import OptimalAgent\n from gridworld.gridworld_data import create_agents_from_config\n\n # 1 Figure, 2 Plots (in a row)\n # True reward on leftmost plot (axes[0])\n # Inferred reward on rightmost plot (axes[1])\n fig, axes = plt.subplots(1, 2)\n\n # Plot the rewards\n plot_reward(true_reward, walls, \"True Reward\", fig=fig, ax=axes[0])\n plot_reward(inferred_reward, walls, \"Inferred Reward\", fig=fig, ax=axes[1])\n # Plot the agents' trajectories (will perform rollout)\n plot_trajectory(\n walls,\n true_reward,\n start,\n true_agent,\n fig=fig,\n ax=axes[0],\n animate=animate,\n fname=filename + \"0\",\n )\n plot_trajectory(\n walls,\n inferred_reward,\n start,\n inferred_agent,\n fig=fig,\n ax=axes[1],\n animate=animate,\n fname=filename + \"1\",\n )\n # Plot starting positions for agents in both the true and inferred reward plots\n plot_pos(start, color=\"m\", grid_size=len(walls), ax=axes[0])\n plot_pos(start, color=\"m\", grid_size=len(walls), ax=axes[1])\n\n # titleing\n fig.suptitle(\"Comparison of Reward Functions\")\n fig.set_tight_layout(True)\n\n # saving to file\n fig.savefig(filename)", "def __init__(self, colorNames):\n self._colorOptions = '' # initials for color choices\n for color in colorNames:\n self._colorOptions += color[0].upper()\n # following will be reset when startGame is called\n self._currentTurnNum = self._lengthOfPattern = self._maxNumberOfTurns = 0" ]
[ "0.8380259", "0.8160164", "0.8160164", "0.81365675", "0.8123886", "0.8115439", "0.8114686", "0.8110517", "0.8087326", "0.8084383", "0.8084383", "0.8084383", "0.80327135", "0.7990393", "0.797317", "0.7959049", "0.794438", "0.7907669", "0.7899954", "0.7830475", "0.78248924", "0.78064114", "0.7785593", "0.77701813", "0.7757271", "0.7506136", "0.7340193", "0.7312656", "0.59666824", "0.5639437", "0.532751", "0.52379864", "0.52254313", "0.5180778", "0.5135724", "0.5004034", "0.5001975", "0.4987506", "0.49796638", "0.4884946", "0.48443735", "0.47957796", "0.47311506", "0.472101", "0.47052786", "0.47045645", "0.46727693", "0.46628186", "0.46505857", "0.4635819", "0.46284556", "0.46193495", "0.4614139", "0.46102196", "0.45939404", "0.4563592", "0.4554589", "0.45340133", "0.45277134", "0.4525116", "0.45239407", "0.45135912", "0.45063534", "0.45009062", "0.44991755", "0.44974402", "0.4496377", "0.44911548", "0.44776228", "0.4476287", "0.44735748", "0.4473369", "0.4462522", "0.4459872", "0.44574496", "0.44381768", "0.4433406", "0.4432375", "0.44230887", "0.44226235", "0.4414828", "0.44088182", "0.44064113", "0.44038185", "0.4401625", "0.43989235", "0.43885094", "0.4387725", "0.43864858", "0.43831897", "0.43820086", "0.43819335", "0.4380079", "0.43718708", "0.43573302", "0.4353361", "0.43472734", "0.4330661", "0.43278426", "0.432503" ]
0.80977505
8
Picks among the actions with the highest Q(s,a).
def chooseAction(self, gameState): actions = gameState.getLegalActions(self.index) # You can profile your evaluation time by uncommenting these lines # start = time.time() values = [self.evaluate(gameState, a) for a in actions] # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start) maxValue = max(values) bestActions = [a for a, v in zip(actions, values) if v == maxValue] foodLeft = len(self.getFood(gameState).asList()) return random.choice(bestActions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)", "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def maxQ(self,state):\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in self.actions:\r\n qCurr = self.Q[(state,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr \r\n return(maxQ,maxA)", "def maxQ(self,feat):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n for a in self.actions:\r\n q = self.Q(feat,a)\r\n print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)", "def best_Q_action(self, state):\n state_Q = {}\n\n for action in self.actions:\n if (state, action) not in self.Q:\n return False\n else:\n state_Q[(state, action)] = self.Q[(state, action)]\n\n return max(state_Q.iteritems(), key=operator.itemgetter(1))[0][1]", "def max_Q_by_state(self, state):\n max_q = []\n\n for action in self.actions:\n if (state, action) not in self.Q:\n return self.Q_default_value\n else:\n max_q.append(self.Q[(state, action)])\n\n return max(max_q)", "def computeActionFromQValues(self, state):\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return None\n qVals = [self.getQValue(state, a) for a in actions]\n bestActions = []\n bestVal = max(qVals)\n for i in range(len(actions)):\n if qVals[i] == bestVal:\n bestActions.append(actions[i])\n return random.choice(bestActions) #Break ties randomly", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n legal_actions = self.getLegalActions(state)\n if len(legal_actions) == 0: return None\n values = [self.getQValue(state, action) for action in legal_actions]\n max_value = max(values)\n best_indices = [index for index in range(len(values)) if values[index] == max_value]\n return legal_actions[random.choice(best_indices)]", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a) \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a)\n \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def select_action(self) -> int:\n # simulation loop\n for i in range(self.iterations):\n self.__simulate(self.root, self.iterations)\n\n # action choice\n max_q = 0\n best_action = 0\n for action in actions:\n new_node = self.root.children[action]\n value = new_node.Q\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def getBestAction(self, state):\n best_action = 0\n max_Q = -9999\n\n for action in self.getLegalActions(state):\n Q = self.getQValue(state, action)\n if Q > max_Q:\n best_action = action\n max_Q = Q\n \n return best_action", "def getPolicy(self, state):\n \"\"\"Description:\n Find all of q-values of current state, and choose the action \n with the hight q-value as optimal policy\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n action = None\n policy = util.Counter() # use counter to store action and its q-value\n \n if len(legalActions) == 0:\n return action\n \n for a in legalActions:\n policy[a] = self.getQValue(state, a)\n action = policy.argMax()\n return action\n\n \"\"\" END CODE \"\"\"", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction", "def better_action(tip_speed):\n possible_actions_in_state = Q[get_state(tip_speed)]\n action_of_choice = np.argmax(possible_actions_in_state)\n return action_of_choice", "def select_action(self, q_values):\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n if np.random.uniform() < self.eps:\n copy_q_values = np.copy(q_values)\n idx = np.argmax(q_values)\n copy_q_values[idx] = 0\n for i in range(0, nb_actions):\n val = copy_q_values[i]\n copy_q_values[i] = -1e8 if val == 0 else val * np.random.uniform()\n action = np.argmax(copy_q_values)\n else:\n action = np.argmax(q_values)\n return action", "def greedy(q, s):\n # Your code here\n return argmax(q.actions,lambda a:q.get(s,a))", "def greedy_policy(self, q, s):\n\t\tresult = []\n\t\tif q is None:\n\t\t\treturn result\n\t\tmax_val = q[0]\n\t\tfor action in self.feasible_actions_in_state(s):\n\t\t\tq_value = q[action]\n\t\t\tif q_value == max_val:\n\t\t\t\tresult.append(action)\n\t\t\telif q_value > max_val:\n\t\t\t\tresult = [action]\n\t\t\t\tmax_val = q_value\n\t\treturn result", "def select_action(self):\n estimated_q_a = self._action_value_estimator.get_estimated_q_a()\n\n if np.random.rand() < self._epsilon:\n chosen_action = random.choice(list(estimated_q_a.keys()))\n else:\n chosen_action = max(estimated_q_a, key=estimated_q_a.get)\n\n return chosen_action", "def select_action(self, state):\n return np.argmax(self.Q[state])", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def _best_action(self, state):\n actions_rewards = list(self.Q[state].items())\n return max(actions_rewards, key=lambda x: x[1])[0]", "def get_max_q(self, actions, q2_state):\n\n action_values = [ qtron.forward_pass(q2_state) for qtron in actions.values() ]\n\n maxQ = max(action_values)\n\n return maxQ", "def best_action(self, actions, state):\n\n maxQvalue = self.valueFromQvalues(state, actions)\n\n if GameEnds13(state):\n return None\n else:\n maxAction = [action for action in actions if self.getQvalue(state, action) == maxQvalue]\n best_action = random.choice(maxAction)\n return best_action", "def value(q, s):\n # Your code here\n return max(q.get(s,a) for a in q.actions)", "def maxOldQ(self,feat):\r\n maxQ = float('-inf')\r\n for a in self.actions:\r\n q = self.oldQ(feat,a)\r\n \r\n if q > maxQ:\r\n maxQ = q\r\n \r\n return(maxQ)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return None\n\n best_action = None;\n best_value = float('-inf')\n for action in self.getLegalActions(state):\n if self.getQValue(state, action) > best_value:\n best_value = self.getQValue(state, action)\n best_action = action\n return best_action", "def bestAction(self, state):\n action = self.q_network.chooseBestAction(state)\n V = max(self.q_network.qValues(state))\n return action, V", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def max_q_value(self, state):\n max_value = None\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if max_value is None or cur_value > max_value:\n max_value = cur_value\n return max_value", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return None\n values = [self.getQValue(state, action) for action in actions]\n LoT = zip(values, actions)\n (bestValue, bestAction) = max(LoT)\n return bestAction", "def execute_best_actions(self):\n while True:\n print(\"In execute_best_actions\")\n s = self.get_state_num()\n qvals = self.Q[s]\n # Get action with largest qval\n best_action = np.argmax(qvals)\n # We don't actually update with rewards,\n # but use them to know when to perform next action\n # We want to travel 0.5 m in action's direction.\n self.apply_action(best_action)\n while self.reward == None:\n rospy.sleep(0.5)\n print(\"Reward =\", self.reward)\n self.reward = None", "def best_action(q_table: np.ndarray, state: int) -> int:\n return int(np.argmax(q_table[state]))", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon):\n\n new_policy = epsilon * np.ones((nS, nA)) / nA # = epsilon / m, where m is the number of Actions, nA\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: IF TWO ACTIONS HAVE THE SAME MAXIMUM Q VALUE, THEY MUST BOTH BE EXECUTED EQUALLY LIKELY.\n # THIS IS IMPORTANT FOR EXPLORATION. This might prove useful:\n # https://stackoverflow.com/questions/17568612/how-to-make-numpy-argmax-return-all-occurrences-of-the-maximum\n \n # print(\"new_policy = {0}\".format(new_policy))\n \n for s_t in range (0, nS):\n # print(\"old_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n # print(\"Q_value[{0}] = {1}\".format(s_t, Q_value[s_t]))\n Q_list = np.argwhere(Q_value[s_t] == np.amax(Q_value[s_t])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n # print(\"Q_list: \" + str(Q_list))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n # print(\"max_Q: \" + str(max_Q))\n \n # A_star = new_policy[s_t][max_Q]\n # print(\"A_star: \" + str(A_star))\n \n new_policy[s_t][max_Q] += 1 - epsilon # for the chosen maximal index of Q, set the polocy to epsilon/m + 1 - epsilon\n # print(\"new_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n \n # for a_t in range (0, nA):\n # if a_t in Q_list:\n # new_policy[s_t][a_t] += (1 - epsilon) / len(Q_list)\n\n ############################\n # print(\"new_policy = {0}\".format(new_policy))\n return new_policy", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(state), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n\n\n #check if teminal\n if self.mdp.isTerminal(state):\n return None\n else:\n # get all actions for state\n actionArr = self.mdp.getPossibleActions(state)\n\n #Q val and action at index 0 of action Array\n currentQ = self.computeQValueFromValues(state, actionArr[0])\n currentAction = actionArr[0]\n\n #loop through action Array\n for action in actionArr:\n #compute q at each index\n possibleQ = self.computeQValueFromValues(state, action)\n\n if (possibleQ == currentQ):\n currentAction = random.choice([currentAction, action])\n\n\n elif (possibleQ > currentQ):\n # want to return action for greatest Q value\n currentAction = action\n # update Q to keep track of corresponding value\n currentQ = possibleQ\n\n\n\n\n return currentAction", "def choose_action(state, qmatrix, numb_actions):\n\n qmatrix = check_state_exist(state, qmatrix, numb_actions)\n if np.random.uniform() < epsilon:\n # choose best action from qmatrix\n state_action = qmatrix.loc[state, :]\n state_action = state_action.reindex(np.random.permutation(state_action.index)) # some actions have same value\n action = state_action.idxmax()\n else:\n # choose random action from qmatrix\n action = np.random.choice(list(range(numb_actions))) \n return action", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n ############################################################################################################ Eric Changed state to self.index\n \n legalActions = state.getLegalActions(self.index)\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, LEGALACTIONS: \", legalActions\n if len(legalActions) == 0:\n return None\n maxValue = self.getQValue(state, legalActions[0])\n maxAction = legalActions[0]\n\n for a in legalActions:\n myQValue = self.getQValue(state, a)\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, MYQVALUE: \", myQValue, \" MAXVALUE: \", maxValue\n if myQValue > maxValue:\n maxValue = self.getQValue(state, a)\n maxAction = a\n if myQValue == maxValue:\n if util.flipCoin(0.5):\n maxValue = self.getQValue(state, a)\n maxAction = a\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, MAXACTION: \", maxAction\n return maxAction\n util.raiseNotDefined()", "def sample_actions(self, qvalues):\n batch_size, n_actions = qvalues.shape\n best_actions = qvalues.argmax(axis=-1)\n\n return qvalues.argmax(axis=-1)", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n optimalAction = None\n maxValue = float('-inf')\n for a in actions:\n qValue = self.computeQValueFromValues(state, a)\n if qValue > maxValue:\n maxValue = qValue\n optimalAction = a\n return optimalAction", "def select_max_q(\n action_scores: torch.Tensor, action_mask: torch.Tensor\n ) -> torch.Tensor:\n # we want to select only from the unmasked actions\n # if we naively take the argmax, masked actions would\n # be picked over actions with negative scores if there are no\n # actions with positive scores. So we first subtract the minimum score\n # from all actions and add a small epsilon so that actions with negative\n # scores would have small positive scores and they would get chosen\n # over masked actions\n shifted_action_scores = (\n action_scores - action_scores.min(dim=1, keepdim=True)[0] + 1e-5\n ) * action_mask\n return shifted_action_scores.argmax(dim=1)", "def __selection(self, node: TreeNode) -> int:\n max_q = 0\n best_action = 0\n\n # update the value of N(s)\n node.N = 0\n for action in actions:\n node.N += node.children[action].N\n\n # choose action using UCT\n shuffled_actions = actions[:]\n random.shuffle(shuffled_actions)\n for action in shuffled_actions:\n new_node = node.children[action]\n value = new_node.Q + self.c * math.sqrt(math.log(node.N) / new_node.N)\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n # Initialize max_value as - infinity\n # Initialize best action as None, choose max_value action\n max_value = float(\"-inf\")\n computed_action = None\n\n for action in actions:\n # Find q value of specified action\n q_value = self.computeQValueFromValues(state, action)\n # Update action if it's the best so far\n if q_value > max_value:\n max_value = q_value\n computed_action = action\n return computed_action", "def get_optimal_action(self, state):\n # check if there are multiple equivalent optimal actions\n if sum(self.Q_values[state] == np.amax(self.Q_values[state])) > 1:\n # select one of the optimal actions randomly\n idxs = np.where(self.Q_values[state] == np.amax(self.Q_values[state]))[0]\n return idxs[np.random.randint(0, idxs.size)]\n else:\n # return the unique optimal action\n return np.argmax(self.Q_values[state])", "def select_action(self, q_values, **kwargs):\n _rand = np.random.rand(1, 1)\n rand_action = np.random.randint(self.num_actions, size=(5, 5))\n q_values = np.reshape(q_values, [10, 5, 5])\n max_action = np.argmax(q_values, axis=0)\n # max_action = np.reshape(max_action, (-1,1))\n action_map = np.reshape(max_action, (5, 5))\n # print(action_map)\n _mask = _rand < self.epsilon\n res = _mask * rand_action + (1 - _mask) * max_action\n return res", "def pick_action(self, available_actions, epsilon=.05):\n if np.random.uniform(0, 1) < epsilon:\n action = available_actions[np.random.randint(\n 0, len(available_actions))]\n else:\n q_values_of_state = self.q_table[self.environment.current_location]\n maxValue = max(q_values_of_state.values())\n action = np.random.choice(\n [k for k, v in q_values_of_state.items() if v == maxValue]\n )\n\n return action", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def choose_action(self, state):\n prob = [] # Probability distribution\n for i in range(len(ACTIONS)):\n prob.append(self.epsilon/4)\n Q_func = self.policy.predict(process_state(state))\n Q_vals = Q_func[0]\n max_index = []\n Qmax = np.amax(Q_vals)\n for i in range(len(prob)):\n if Q_vals[i] == Qmax:\n # max_index.append(i)\n prob[i] = 1 - self.epsilon + self.epsilon/4\n break\n # ind = np.random.choice(max_index)\n # prob[ind] = 1 - self.epsilon + self.epsilon/4\n action = np.random.choice(ACTIONS, p = prob)\n return action", "def querysetstate(self, s): \n self.s = s \n\n if np.random.random() < self.rar:\n action = np.random.randint(self.num_actions)\n else:\n action = np.argmax(self.Q[s])\n\n if self.verbose: \n print(f\"s = {s_prime}, a = {action}, r={r}\") \n\n return action", "def select_action(self, q_values):\n \n action = Sc2Action()\n\n # Epsilon-Greedy\n # pdb.set_trace()\n egran=np.random.uniform()\n if egran < self.eps and not self.testing:\n action.action = np.random.random_integers(0, self.nb_actions-1)\n action.coords = (np.random.random_integers(0, self.nb_pixels-1), np.random.random_integers(0, self.nb_pixels-1))\n if self.eps <0.05:\n print('eps:',self.eps)\n\n else:\n # greedy.\n action.action = np.argmax(q_values[0]) \n #pdb.set_trace()\n action.coords = np.unravel_index(q_values[1].argmax(), q_values[1].shape)[1:3]\n \n # action.coords = np.unravel_index(np.reshape(q_values[1][0][:][:], (16, 16)).argmax(), np.reshape(\n\n assert len(action.coords) == 2\n\n return action", "def act(self,observation):\n maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()\n return(np.random.choice(maximum_actions))", "def getValue(self, state):\n \"\"\"Description:\n first get legal actions of current state and find the max q-value among all legalaction. \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n if len(legalActions) == 0:\n return 0.0\n maxValues = max([ self.getQValue(state, a) for a in legalActions])\n return maxValues\n \n \"\"\" END CODE \"\"\"", "def select_final(self):\n best_qsa_star = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n if qsa_star > best_qsa_star:\n best_qsa_star = qsa_star\n best_node = c\n return best_node.action", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def computeActionFromValues(self, state):\n best_move = None\n best_Q = float(\"-inf\")\n\n for a in self.mdp.getPossibleActions(state):\n q = self.computeQValueFromValues(state,a)\n if q > best_Q:\n best_Q = q\n best_move = a\n\n return best_move", "def best_action(self):\n child_score = self.child_Q() + self.mcts.c_puct * self.child_U()\n masked_child_score = child_score\n return np.argmax(masked_child_score)", "def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if len(self.getLegalActions(state)) == 0:\n return 0.0\n max_value = -float('inf')\n for action in self.getLegalActions(state):\n max_value = max(max_value, self.getQValue(state, action))\n return max_value", "def computeValueFromQValues(self, state):\n \treturn max([self.getQValue(state, action) for action in self.actions])", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n self.Temporary_QValue = util.Counter() #initializing a temporary QValue counter\n\n temporary_QValue = self.Temporary_QValue\n\n legal_Actions = self.getLegalActions(state) #get all the legal actions like north,south,east,west,exit\n\n length_legalActions = len(legal_Actions) #find length of legal actions just to find later if we have legal actions or not\n\n if length_legalActions == 0: #to check if we have any legal action or not\n return 0.0 #Returns value 0 as we do not have any legal actions, we cannot pass 'None' as autograder in q8 expects a float value and not string value\n\n for a in legal_Actions: #loop to check for each legal action\n\n temporary_QValue[a] = self.getQValue(state,a) #Find the Qvalue of each action\n\n best_action = temporary_QValue.argMax() #find the best action to take in a state\n return best_action\n #util.raiseNotDefined()", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def get_best_action(self, state):\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n #\n # INSERT CODE HERE to get best possible action in a given state (remember to break ties randomly)\n #\n\n best_action_value = self.get_qvalue(state, possible_actions[0])\n best_actions = [possible_actions[0]]\n for action in possible_actions[1:]:\n value = self.get_qvalue(state, action)\n if value > best_action_value:\n best_actions = [action]\n best_action_value = value\n elif value == best_action_value:\n best_actions.append(action)\n\n best_action = random.choice(best_actions)\n\n return best_action", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def bestFutureReward(self, state):\n available_actions = Nim.availableActions(state)\n if not available_actions:\n return 0\n\n best_q = -math.inf\n for action in available_actions:\n best_q = max(best_q, self.getQValue(state, action))\n return best_q", "def choose_action(self, board):\n options = board.empty_cells\n # to allow exploration, have a small probability of a random move\n p_random = random.random()\n # if the state is not in the table add it\n if (self.sign, board.state) not in self.Q_table.keys() or p_random < self.epsilon:\n values = {}\n for option in options:\n values[option] = random.random()\n self.Q_table[(self.sign, board.state)] = values\n self.action = random.choice(options)\n else:\n values = self.Q_table[(self.sign, board.state)]\n action = max(values, key=values.get)\n self.action = action\n\n # decrease exploration after each action\n if self.epsilon > 0:\n self.epsilon -= 0.0001\n\n return self.action", "def max_value (self, new_state):\n \n ##create a list to save reward information\n return_list = []\n \n ##get each values from Q based on the new_state and its possible actions\n for s, a in self.Q.keys():\n if s == new_state:\n return_list.append(self.Q[s,a])\n \n ##return the maximum value based on new_state\n return max(return_list)", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def get_action_choice(self, state: str, epsilon: float):\n\n # e-greedy\n if random.random() < epsilon:\n return {0: random.choice(Actions.actions), 1: random.choice(Actions.actions)}\n else:\n # Get the Q-values for the actions in this state\n Qs_t = self.Q_t[state]\n\n max_Qs_t = max(Qs_t.values())\n\n # find index of the max Q-values\n max_index = [a for a, q in Qs_t.items()\n if q == max_Qs_t]\n\n # choose one of the max-index with uniform distribution\n selected = random.choice(max_index)\n return {0: selected[0], 1: selected[1]}", "def act(self):\n\n\t\t# Figure out the action selected by each head\n\t\tQs = self.dqn.get_Qs(self.state_history)\n\t\tactions = np.argmax(Qs, axis=1)\n\n\t\t# Select the action of the control head\n\t\taction = actions[self.head_number]\n\t\tQ = Qs[self.head_number]\n\n\t\treturn action, Q", "def q(self, s, a):\n # The Q value of the current state is based on the max Q value of the next state.\n next_state_max_q = max([self.qtable[s[0]+x][s[1]+y] for (x,y) in self.maze.moves()])\n self.qtable[s[0]+a[0]][s[1]+a[1]] = (self.qtable[s[0]+a[0]][s[1]+a[1]]\n + self.alpha * (self.r(s,a) + self.gamma * next_state_max_q\n - self.qtable[s[0]+a[0]][s[1]+a[1]]))\n\n return self.qtable[s[0]+a[0]][s[1]+a[1]]", "def choose_action(self, state, epsilon_greedy=False):\n chosen_action = None\n if epsilon_greedy:\n if np.random.rand() <= self.epsilon:\n print('random actions')\n\n # choose random action\n chosen_action = random.choice(self.actions)\n\n else:\n print('argmax')\n\n # find the action with greatest Q value\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n else:\n\n # policy rollout\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n return chosen_action", "def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)", "def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"", "def get_action(self, state):\n depth_limit = 20\n\n if state.ply_count < 4 and self.data is not None:\n if state in self.data:\n self.queue.put(self.data[state])\n else:\n self.queue.put(random.choice(state.actions()))\n else:\n for depth in range(1, depth_limit+1):\n best_move = self.alpha_beta_search(state, depth)\n if best_move is not None:\n self.queue.put(best_move)", "def computeActionFromQValues(self, state):\n acs = util.Counter()\n for action in self.actions:\n\t\t\tacs[action]=self.getQValue(state, action)\n return acs.argMax()", "def select_action_q_and_u(self, env, is_root_node) -> chess.Move:\n\n # this method is called with state locked\n state = board_state_key(env)\n\n my_visitstats = self.tree[state]\n\n if my_visitstats.p is not None: #push p to edges\n tot_p = 1e-8\n for mov in env.legal_moves():\n mov_p = my_visitstats.p[self.move_lookup[mov]]\n my_visitstats.a[mov].p = mov_p\n tot_p += mov_p\n for a_s in my_visitstats.a.values():\n a_s.p /= tot_p\n my_visitstats.p = None\n\n xx_ = np.sqrt(my_visitstats.sum_n + 1) # sqrt of sum(N(s, b); for all b)\n\n e = self.play_conf.noise_eps\n c_puct = self.play_conf.c_puct\n dir_alpha = self.play_conf.dirichlet_alpha\n\n best_s = -999\n best_a = None\n if is_root_node:\n noise = np.random.dirichlet([dir_alpha] * len(my_visitstats.a))\n\n i = 0\n for action, a_s in my_visitstats.a.items():\n p_ = a_s.p\n if is_root_node:\n p_ = (1-e) * p_ + e * noise[i]\n i += 1\n b = a_s.q + c_puct * p_ * xx_ / (1 + a_s.n)\n if b > best_s:\n best_s = b\n best_a = action\n\n return best_a", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def best_action(self, state):\n return random.choice(self.possible_actions)", "def choose_maxQ_command(self, action_rank, word_mask=None):\n action_rank = action_rank - torch.min(action_rank, -1, keepdim=True)[0] + 1e-2 # minus the min value, so that all values are non-negative\n if word_mask is not None:\n assert word_mask.size() == action_rank.size(), (word_mask.size().shape, action_rank.size())\n action_rank = action_rank * word_mask\n action_indices = torch.argmax(action_rank, -1, keepdim=True) # batch x 1\n return action_indices", "def calcQ(self,thisObs,next_action,reward):\n \n thisObs_tup=(thisObs['volume'],thisObs['time'])\n lastAction_tup=(self.lastAction['vol'],self.lastAction['price'])\n lastObs_tup=(self.lastObs['volume'],self.lastObs['time'])\n lastQvalue=0\n maxQvalue=0\n temp_action=()\n \n if (len(self.Qvalue)>0): \n \"\"\"Searches the Q-value dictionary\"\"\"\n for key,value in self.Qvalue.iteritems():\n \n if (key[0][0]== thisObs_tup[0] and key[0][1]==thisObs_tup[1]):\n if (value > maxQvalue):\n maxQvalue=value\n temp_action = key[1]\n \n if (key[0][0]== lastObs_tup[0] and key[0][1]==lastObs_tup[1] and \n key[1][0]== lastAction_tup[0] and key[1][1]==lastAction_tup[1]):\n \n lastQvalue=self.Qvalue[key]\n #print(\"This state was already encoutered and updated\")\n \n self.Qvalue[(lastObs_tup,lastAction_tup)]=lastQvalue+alpha*(reward+(gamma*maxQvalue)-lastQvalue) \n #print 'The Qtable is',self.Qvalue\n if (len(temp_action)!=0):\n #print \"I found a greedy action\" \n next_action['vol'] = temp_action[0]\n next_action['price']=temp_action[1]\n else: \n next_action=self.return_random_action(thisObs)\n \n return next_action", "def get_greedy_action(Q, obs):\n obs = Q.xp.asarray(obs[None], dtype=np.float32)\n with chainer.no_backprop_mode():\n q = Q(obs).data[0]\n return int(q.argmax())", "def chooseAction(self, state, use_epsilon=True):\n epsilon = self.epsilon if use_epsilon else 0\n choose_random = True if random.random() < epsilon else False\n # assume there is always an available action\n available_actions = Nim.availableActions(state)\n if choose_random:\n return random.choice(list(available_actions))\n \n best_q = -math.inf\n best_action = None\n for action in available_actions:\n q_val = self.getQValue(state, action)\n if q_val > best_q:\n best_q = q_val\n best_action = action\n return best_action", "def choose_action(self, state):\n if random.random() < self.e_greedy_prob:\n # randomly select action from state\n action = np.random.choice(len(self.q_val_table[state]))\n else:\n # greedily select action from state\n action = np.argmax(self.q_val_table[state])\n return action", "def act(self, q_values, *args, **kwargs):\n return np.array([np.argmax(q_values)])", "def chooseAction(self, gameState):\n probabilities = self.assignProbablities(gameState)\n #print probabilities\n prob, bestProbabilityAction = max(probabilities)\n return bestProbabilityAction", "def _transition_q_learning(self):\n if self.state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(self.state, self.qstore.q)\n\n action_values = self.qstore.q[self.state.as_tuple()]\n # epsilon greedy choice\n if np.random.random() < self.epsilon:\n action = State(*action_values['actions'][np.random.randint(len(action_values['actions']))])\n else:\n max_q_value = max(action_values['utilities'])\n max_q_indexes = [i for i in range(len(action_values['actions'])) if\n action_values['utilities'][i] == max_q_value]\n max_actions = [action_values['actions'][i] for i in max_q_indexes]\n action = State(*max_actions[np.random.randint(len(max_actions))])\n\n self.state = action.copy()\n\n self._post_transition_updates()", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n feat = self.feat_funct(state)\r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(feat)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def computeactionfromqvalues(self, state):\n legalactions = env.getlegalactions(env.state_to_array(state))\n if len(legalactions) == 0:\n return None\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp.argMax()", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n self.Temporary_QValue = util.Counter() #initializing a temporary QValue counter\n\n temporary_QValue = self.Temporary_QValue\n\n maxAction_OverLegalAction = self.getPolicy(state) #Calls get poilcy which in turn calls the computeActionFromQValues function to get the action we need to take\n\n if maxAction_OverLegalAction == 0: #checks if returned state is terminal state\n return 0.0\n\n temporary_QValue[maxAction_OverLegalAction] = self.getQValue(state,maxAction_OverLegalAction) #to get the Qvalue of the action returned from computeActionFromQValues function\n\n\n return temporary_QValue[maxAction_OverLegalAction] #Returns the max_action Q(state,action)\n #util.raiseNotDefined()", "def act(self, state):\r\n self.state_info, actions= self.env.generatePossibleAction(state)\r\n # import pdb; pdb.set_trace()\r\n # print(actions)\r\n if self.eps > 0. and np.random.rand() < self.eps:\r\n # select the action randomly\r\n return random.choice(actions)\r\n # import pdb; pdb.set_trace()\r\n qvals = {action: self.Q_value[self.state_info, action] for action in actions}\r\n max_q = max(qvals.values())\r\n\r\n # in case of multiple actions having the same Q values\r\n actions_with_max_q = [a for a,q in qvals.items() if q == max_q]\r\n return random.choice(actions_with_max_q)", "def select_action(self, state, evaluate):\n random_number = np.random.uniform()\n if random_number < self.epsilon and evaluate==False:\n # Random action\n return torch.tensor(random.randint(0,self.env.nA-1))\n\n else:\n # Greedy action\n state = [state]\n state = torch.stack(state)\n state = state.to(self.device, dtype=torch.float)\n q_values = self.main_dqn(state)\n argmax = torch.argmax(q_values).item()\n\n if evaluate:\n if self.env.freely_moving:\n self.list_evaluation_values.append(q_values.reshape(self.env.number_of_rows, self.env.number_of_columns))\n else:\n self.list_evaluation_values.append(q_values.reshape(1, self.env.nA))\n\n return torch.tensor(argmax)", "def max_q(state, task, game, value_fct, completion_fct, eps, destination=None):\n\n total_reward, t_elapsed = 0, 0\n t_init = game.get_time()\n k = 0\n if eps == 0:\n print task\n\n while t_elapsed < 5000 and not check_termination(game, task, destination):\n sub_tasks = CHILDREN_TASKS[task]\n rd = np.random.uniform()\n target = None\n\n # Action choice\n if rd < eps:\n action = np.random.choice(sub_tasks)\n if action == \"navigate\":\n idx = np.random.randint(0, len(TARGETS[task]))\n target = TARGETS[task][idx]\n else:\n # Case when navigate is a subtask\n if task in [\"unload\", \"get_wood\", \"get_gold\"]:\n best_q, best_action, best_target = -np.inf, None, None\n for sub_action in sub_tasks:\n if sub_action == \"navigate\":\n for sub_target in TARGETS[task]:\n q = get_max_value_function(state, sub_action, value_fct,\n completion_fct, sub_target)\n if q > best_q:\n best_q, best_action, best_target = q, sub_action, sub_target\n else:\n q = get_max_value_function(state, sub_action, value_fct,\n completion_fct, destination)\n if q > best_q:\n best_q, best_action = q, sub_action\n action, target = best_action, best_target\n\n # When navigate is not a sub-task\n else:\n q = [get_max_value_function(state, action, value_fct,\n completion_fct, destination)\n for action in sub_tasks]\n action = sub_tasks[np.argmax(q)]\n\n # Action execution\n if action in PRIMITIVE_TASKS:\n game = primitive_action(game, action)\n reward = game.get_reward()\n elif action == \"navigate\":\n reward = max_q(state, action, game, value_fct, completion_fct, eps,\n target)\n else:\n reward = max_q(state, action, game, value_fct, completion_fct, eps)\n\n # Observe action result\n next_state = game.get_state()\n total_reward += reward\n t_elapsed = game.get_time()-t_init\n\n # Update value or completion function\n # Primitive action case\n if action in PRIMITIVE_TASKS:\n key = flatten((state, action))\n if key not in value_fct:\n value_fct[key] = 0\n value_fct[key] = (1-ALPHA)*value_fct[key] + ALPHA*reward\n\n # Navigate task case\n elif task == \"navigate\":\n key = flatten((task, state, action, destination))\n values = []\n # Navigate cannot be a sub-task\n for sub_task in sub_tasks[task]:\n max_v = get_max_value_function(next_state, sub_task, value_fct,\n completion_fct)\n sub_key = flatten((task, state, sub_task, destination))\n if sub_key not in completion_fct:\n completion_fct[sub_key] = INIT_CMP\n cmp = completion_fct[sub_key]\n values.append(max_v + cmp)\n if key not in completion_fct:\n completion_fct[key] = INIT_CMP\n completion_fct[key] = (1 - ALPHA) * completion_fct[key] + \\\n ALPHA * DISCOUNT**k * max(values)\n\n # Other task case\n else:\n key = flatten((task, state, action, target))\n values = []\n # Navigate can be a sub-task\n for sub_task in CHILDREN_TASKS[task]:\n if sub_task == \"navigate\":\n for sub_target in TARGETS[task]:\n max_v = get_max_value_function(next_state, sub_task,\n value_fct,\n completion_fct, sub_target)\n sub_key = flatten((task, state, sub_task, sub_target))\n if sub_key not in completion_fct:\n completion_fct[sub_key] = INIT_CMP\n cmp = completion_fct[sub_key]\n values.append(max_v + cmp)\n else:\n max_v = get_max_value_function(next_state, sub_task,\n value_fct, completion_fct)\n sub_key = flatten((task, state, sub_task, destination))\n if sub_key not in completion_fct:\n completion_fct[sub_key] = INIT_CMP\n cmp = completion_fct[sub_key]\n values.append(max_v + cmp)\n\n if key not in completion_fct:\n completion_fct[key] = INIT_CMP\n completion_fct[key] = (1-ALPHA)*completion_fct[key] + \\\n ALPHA* DISCOUNT**k *max(values)\n\n\n k += 1\n state = next_state\n\n return total_reward" ]
[ "0.7763381", "0.77043587", "0.76056725", "0.755616", "0.73318", "0.7316181", "0.71344316", "0.71120584", "0.6966468", "0.6956663", "0.6948445", "0.69366306", "0.6920287", "0.6893597", "0.68934804", "0.68932414", "0.6878565", "0.6877915", "0.68764764", "0.6848047", "0.6837404", "0.6837404", "0.6825064", "0.68230164", "0.68131953", "0.68104434", "0.6791547", "0.67533994", "0.67349565", "0.6685623", "0.66737473", "0.6664493", "0.6651052", "0.6614438", "0.66109276", "0.6573264", "0.6516246", "0.65122837", "0.65102243", "0.65033066", "0.65033066", "0.65033066", "0.65033066", "0.6497211", "0.64915705", "0.6474794", "0.64646715", "0.64645195", "0.646023", "0.6457709", "0.6450727", "0.6439535", "0.6418481", "0.6413142", "0.63965946", "0.63929135", "0.6386545", "0.6367848", "0.6315374", "0.6292661", "0.6288466", "0.6282109", "0.6278602", "0.6276218", "0.6276218", "0.626504", "0.62420595", "0.62316275", "0.61954904", "0.61954904", "0.6195289", "0.6191638", "0.61843264", "0.618185", "0.61782765", "0.6163998", "0.6142108", "0.61408806", "0.6137439", "0.6125619", "0.6124415", "0.61086917", "0.6106805", "0.6095636", "0.60907054", "0.60818464", "0.6076739", "0.60728645", "0.6071869", "0.60684896", "0.6066672", "0.6050066", "0.6049702", "0.6033178", "0.60289496", "0.60082567", "0.6003589", "0.60034215", "0.5996278", "0.59911627", "0.59886557" ]
0.0
-1
Takes a url and email, sends POST request and display body
def main(): post_url = argv[1] params = { 'email': argv[2] } query_string = parse.urlencode(params) post_data = query_string.encode("ascii") with request.urlopen(post_url, post_data) as post_response: response_text = post_response.read() print(response_text.decode("UTF-8"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n return send_email(request.args)", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['adrian.borowski.tattoo@gmail.com']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def email_post(request):\n if request.user.is_authenticated:\n messages.error(request, _(\"You are already logged in.\"))\n return redirect(ta_settings.LOGIN_REDIRECT)\n\n form = EmailForm(request.POST)\n if not form.is_valid():\n messages.error(request, _(\"The email address was invalid. Please check the address and try again.\"))\n return redirect(ta_settings.LOGIN_URL)\n\n email = ta_settings.NORMALIZE_EMAIL(form.cleaned_data[\"email\"])\n if not email:\n # The user's normalization function has returned something falsy.\n messages.error(\n request, _(\"That email address is not allowed to authenticate. Please use an alternate address.\")\n )\n return redirect(ta_settings.LOGIN_URL)\n\n email_login_link(request, email, next_url=request.GET.get(\"next\", \"\"))\n\n messages.success(request, _(\"Login email sent! Please check your inbox and click on the link to be logged in.\"))\n return redirect(ta_settings.LOGIN_URL)", "def sendUrl(body, numberOfUrls):\n #POST parameters **On Mac Python 3.1 remove .encode() if error; on Linux Python 3.3 .encode() is required**\n postBody = (str(numberOfUrls) + \"\\n\" + body).encode()\n # create your HTTP request **sometimes 403 errors so change userAgent**\n request = Request(url, postBody)\n # submit your request\n #print(\"Sent to Google.\")\n res = build_opener().open(request)\n #print(\"Retrieved file from Google.\")\n html = res.read().decode(\"utf-8\")\n res.close()\n if not html:\n for i in range(0, numberOfUrls):\n html += \"ok\"\n # save retrieved HTML to file\n saveToFile(html)", "def do_POST(self): # noqa\n l = int(self.headers['Content-Length'])\n new_address = self.rfile.read(l).decode('utf-8')\n if check.match(new_address) is not None:\n logging.info(\"Forwarding {} to sales.\".format(new_address))\n Thread(target=self.send_email, args=(new_address, )).start()\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.send_header('Access-Control-Allow-Origin',\n 'http://numat-tech.com')\n self.end_headers()\n self.wfile.write(new_address.encode('utf-8'))\n else:\n logging.exception(\"Received malformed email: \" + new_address)\n self.send_response(500)", "def _handle_post_request(self):\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST'})\n\n if self.path == '/URLRequest':\n # First we check, whether the formular has been filled by\n # something behaving like a bot\n if form.has_key('URL'):\n self._send_homepage('<p class=\"warning\">Please check your input</p>')\n return\n else:\n url = form['real_URL'].value if form.has_key('real_URL') else None\n tmp = self._insert_url_to_db(url)\n if tmp:\n try:\n blocked = self._db.is_hash_blocked(tmp)\n if tmp < 0:\n self._send_database_problem()\n return\n elif blocked:\n self._send_blocked_page(blocked[3])\n return\n else:\n self._send_return_page(tmp)\n return\n except YuDatabaseError:\n self._send_database_problem()\n return\n else:\n # There was a general issue with URL\n self._send_homepage('''<p class=\"warning\">Please check your input.</p>''')\n return\n elif self.path == '/ContactUs':\n if form.has_key('URL'):\n # Here we might have a bot who likes to send the webmaster some spam\n # who most likely will be not amused about.\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='There was an issue with your request. Are you a bot? '\n '<a href=\"/ContactUs\">Please try again</a>.')\n else:\n try:\n email = form['email'].value\n subj = form['subject'].value\n descr = form['request'].value\n if self._send_mail(subj, descr, email):\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail sent',\n msg=\"Your request has been sent. You will receive an answer soon.\")\n else:\n self._send_internal_server_error()\n return\n except KeyError:\n template_filename = self._get_config_template('contactUsResult')\n text = read_template(\n template_filename,\n title='',\n header='Mail NOT sent',\n msg='It appers you did not fill out all needed fields.\\\n <a href=\"/ContactUs\">Please try again</a>.')\n\n elif self.path == '/Show':\n short_url = form['ShortURL'].value if form.has_key('ShortURL') else None\n if short_url != None and short_url.find(\"yaturl.net\") > -1:\n tmp = short_url.rfind(\"/\")\n if tmp > -1 and short_url != \"\":\n tmp = tmp + 1\n short_url = short_url[tmp:]\n if short_url != None and short_url.isalnum():\n try:\n result = self._db.get_link_from_db(short_url)\n except YuDatabaseError:\n self._send_database_problem()\n return\n template_filename = self._get_config_template('showpage')\n if result:\n new_url = '<p><a href=\"%(result)s\">%(result)s</a></p>' % \\\n {'result': result}\n else:\n new_url = '<p class=\"warning\">No URL found for this string. Please double check your\\\n <a href=\"/ShowURL\">input and try again</a></p>'\n\n stats = self._db.get_statistics_for_hash(short_url)\n\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n msg=new_url,\n stat=stats,\n statspage=\"/stats/\" + short_url)\n else:\n self._send_404()\n return\n\n else:\n self._send_404()\n return\n\n self._send_response(text, 200)", "def _send_request(self, url, text=None, params=None):\n if params is not None:\n for k, v in params.items():\n params[k] = v.encode(\"utf-8\")\n else:\n params = {}\n\n params['email'] = self._username\n\n if self._password:\n params['pass'] = self._password\n\n if self._hash:\n params['hash'] = self._hash\n\n if text is not None:\n params['s'] = self._stripslashes(text)\n\n\n try:\n response = requests.post(url, data=params)\n except Exception as e:\n print(str(e))\n\n result = response.content.decode('utf-8')\n \n\n try:\n json_data = json.loads(result)\n except ValueError as e:\n print(str(e))\n\n if json_data['status'] == \"Success\":\n return json_data\n elif json_data['status'] == \"Failure\":\n if json_data['error'].startswith(\"Error Authenticating.\"):\n print(json_data['error'])\n else:\n print(json_data['error'])\n else:\n print(json_data)", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def email_body_beta_email(url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Thanks for signing up for Insprite! We are excited that you\\'re interested in what we are doing over here. We are creating Insprite to be a vibrant, friendly community where you can both learn from creative people in your area, and teach your passions to others. We sincerely hope that you will be a part of it!'\n\tmsg = msg + '<br><br>We\\'re currently in the process of finishing up Insprite... and we\\'re nearly there. We\\'re just adding some bells and whistles so it\\'ll be the best possible experience.<br><br>'\n\tmsg = msg + 'We will be in touch when we\\'re ready to launch&mdash;tentatively in late 2014. We can\\'t wait to show you what we\\'ve been working on. You\\'re going to love it.<br><br>'\n\tmsg = msg + 'In the meantime, feel free to drop us a line, or follow us on our <a href=\"#\" style=\"color:#1488CC\">Blog</a>, where we will post lots of cool bloggy things (no, really, we\\'re gonna try and keep it interesting).<br><br>'\n\tmsg = msg + '<br>Spritely yours,<br>'\n\tmsg = msg + 'The Insprite Gang </font>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'", "def send_request(request):\n auth()\n response = urllib2.urlopen(request)\n\n return BeautifulSoup(response).resultmessage.string", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def post(self, request):\n config_name = request.POST.get('config')\n email = request.POST.get('recipient')\n config = MailConfig.objects.get(name=config_name)\n version = TemplateVersion.objects.active(config.template.name)\n message = utils.render(config_name, email, version.test_data)\n pk = utils.send(\n f'[TEST] {message.subject}',\n message.from_email,\n message.to_email,\n message.body\n )\n return JsonResponse({'id': pk})", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def send_email(self):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(\"sunnysunita.com@gmail.com\", \"tdcvgycwrzthjqgj\")\n\n subject = \"Price Fell Down\"\n body = \"Check the amazon link \" + self.__product_URL\n message = f\"Subject: {subject}\\n\\n{body}\"\n server.sendmail(\n \"sunnysunita.com@gmail.com\",\n self.__email,\n message\n )\n #print(\"Our mail is sent!!!!\")", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def post(self, url, data):\n return self.app.post(get_url(url), data=data, follow_redirects=True)", "def email_body_recover_your_password(url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ebebeb\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"110\" width=\"600\" height=\"350\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:16px;\">We get it&mdash;strong passwords can be tough to remember.<br><br>'\n\tmsg = msg + 'No biggie, simply <a href=\\\"' + url + '\\\" style=\"color:#1488CC\">follow the instructions to change it.</a> and you\\'ll be good to go.<br><br>'\n\tmsg = msg + 'Didn\\'t request for a password reset? <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Give us a holler ASAP</a>.</font>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '| Sent by <a href=\\\"https://insprite.co\\\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def post_fixture(url=None, data_file=None):\n if url is None:\n url = 'http://localhost:5000/calendar/invites/incoming'\n temp_file = None\n if data_file is None:\n handle, temp_file = tempfile.mkstemp()\n data_file = temp_file\n os.write(\n handle,\n 'envelope[from]=err8n@eservices.virginia.edu&\\n'\n 'headers[Subject]=This is a subject&\\n'\n 'headers[To]=frank-bot@cloudmailin.com,\\n'\n 'lam2c@virginia.edu,\\n'\n 'rag9b@virginia.edu&\\n'\n 'plain=When: Wednesday, Apri 20, 2016 4:00 PM-4:30 PM '\n '(UTC-05:00) Eastern Time (US %26 Canada)%13'\n 'Where: An office%13'\n '%13'\n '*~*~*~*~*~*~*~*~*~*%13'\n '%13'\n '%13'.encode('utf8')\n )\n os.close(handle)\n\n print(' '.join(\n ['curl', '-v', '-X', 'POST', '--data-ascii', '@' + data_file,\n # '--trace', '-',\n url],\n ))\n subprocess.run(\n ['curl', '-v', '-X', 'POST', '--data-ascii', '@' + data_file,\n # '--trace', '-',\n url],\n )\n\n if temp_file is not None:\n os.remove(temp_file)", "def post(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def PostEmails(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def request(ctx, flow):\n ctx.log(\"request\")\n #print \"REQUEST:\"\n #print flow.request._assemble()\n #print str(flow.request.headers[\"Host\"][0])\n try:\n # no windows update\n if str(flow.request.headers[\"Host\"][0]).endswith('windowsupdate.com'):\n flow.request.host = \"127.0.0.1\"\n flow.request.headers[\"Host\"] = [\"127.0.0.1\"]\n\n file = open(\"data/urls.txt\", \"a\")\n if flow.request.port == 443:\n file.write(\"HTTPS \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n else:\n file.write(\"http \" + str(flow.request.headers[\"Host\"][0]) + \"\\n\")\n file.close()\n\n #if 'Accept-Encoding' in flow.request.headers:\n flow.request.headers[\"Accept-Encoding\"] = ['none']\n\n form = flow.request.get_form_urlencoded()\n if form:\n file = open(\"data/forms.txt\", \"a\")\n file.write(flow.request.path + \"\\n\")\n file.write(str(form))\n file.close()\n\n except Exception as ee:\n ctx.log(str(ee))", "def receive_email_view(request):\n save_inbound_email(request.POST, request.FILES)\n return HttpResponse(200)", "def email_body_verify_account(verify_email_url):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#fffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF; padding-top:35px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoB.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-1.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:50px; padding-left:85px; padding-right:85px; padding-bottom:25px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Welcome to Insprite! We\\'re thrilled that you\\'ve joined us.<br><br>'\n\tmsg = msg + 'Insprite lets you connect with and learn from the creative people around you, and to teach your passions and skills to others. We think you\\'ll love exploring our growing community!<br><br>'\n\tmsg = msg + 'Before you explore the cool things you can learn and experience from the inspiring, creative people around you (or decide to be one of them), <a href=\\\"' + verify_email_url + '\\\">please verify your email account</a> so we know you\\'re a real breathing human.<br><br>'\n\tmsg = msg + 'If you\\'re getting this message by mistake and didn\\'t create an account, <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">drop us a line</a> and we\\'ll get on it ASAP.</font>'\n\tmsg = msg + '</td></tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\" height=\"200\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"center\" valign=\"top\">'\n\tmsg = msg + '<a href=\"'+ verify_email_url + '\" style=\"color:#ffffff;text-decoration:none;display:inline-block;min-height:38px;line-height:39px;padding-right:16px;padding-left:16px;background:#1488CC;font-size:14px;border-radius:999em;margin-top:15px;margin-left:5px;font-family:Garamond, EB Garamond, Georgia, serif;\" target=\"_blank\">Verify your account</a>'\n\tmsg = msg + '</td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\"><img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\"></td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\"></td></tr></table>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> | Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '</td></tr></table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\"><tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\" align=\"left\" valign=\"middle\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\"></td></tr></table>'\n\treturn msg", "def send_mailshot(mailshot_data):\n\n url = settings.mailer_endpoint\n headers = {'Content-Type': 'application/json'}\n response = requests.post(url, headers=headers, data=mailshot_data)", "def post():\n contactus_json = request.get_json()\n\n try:\n dict_data = ContactUsSchema().load(contactus_json)\n dict_data['description'] = escape(dict_data['description'])\n EmailService.save_and_send(EmailType.CONTACT_US, dict_data)\n response, status = 'Received successfully', HTTPStatus.OK\n except ValidationError as project_err:\n response, status = {'systemErrors': project_err.messages}, \\\n HTTPStatus.BAD_REQUEST\n return response, status", "def post_form(url, headers, payload):\n\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n return RestClient.make_post_request(url, headers=headers, data=payload)", "def send_reminder(self, url):\n variables = {\"url\": url, \"username\": self.contact.user.alias}\n send_template_email(recipients=[self.identifier],\n subject=\"Reminder from Rmnd.in!\",\n from_address=\"reminders@rmnd.in\",\n variables=variables,\n template=\"email/reminder_email\")", "def test_0110_activationkey_resend_post_1(self):\n response = self.fetch(\n '/activation_resend', method=\"POST\", follow_redirects=False,\n body=urlencode({'email':'abc@example.com'})\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(u'we could not match your email'), 1\n )", "def send_mail(email):\n return email.send()", "def hello():\n email = request.args.get('email')\n\n # Logic to select receipient\n # # TODO:\n\n my_sender='pythondistributionbot@gmail.com' # Sender Mail\n my_pass = 'czwcekimsscsixzx' # Sender PW\n my_user='drcharlesshi@gmail.com' # Reciver Mail\n try:\n msg=MIMEText('填写邮件内容','plain','utf-8')\n msg['From']=formataddr([\"FromRunoob\",my_sender]) # Sender Nick/mail\n msg['To']=formataddr([\"FK\",email]) # Reciver Nike/Mail\n msg['Subject']=\"发送邮件测试\" # Topic\n\n server=smtplib.SMTP_SSL(\"smtp.gmail.com\", 465) # SMTP,port 465\n server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(my_sender,[my_user,],msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit() # 关闭连接\n except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n return 'Failure ' + request.args.get('email')\n return 'Success ' +", "def send_email(email_body, make_network_requests):\n\n if make_network_requests:\n ses = boto3.client(\"ses\", region_name=AWS_REGION)\n ses.send_email(\n Source=EMAIL_FROM,\n Destination={\"ToAddresses\": [EMAIL_TO]},\n Message={\n \"Subject\": {\"Data\": EMAIL_SUBJECT},\n \"Body\": {\"Text\": {\"Data\": email_body}},\n },\n )\n else:\n print(email_body)", "def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")", "def do_POST(s):\n s.send_response(200)\n s.send_header(\"Content-type\", \"text/html\")\n s.end_headers()\n postdata = s.parse_POST()\n writeToDir('data', mapValues(postdata))\n s.wfile.write(\"<html><head><title>Thanks</title></head><body>Thank you</body></html>\")", "def add(self, emails):\r\n request = http.Request('POST', self.get_url(), emails)\r\n\r\n return request, parsers.parse_json", "def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def _bg_post(url, data):\n threading.Thread(target=requests.post, args=(url, data)).start()", "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def incoming_mail(request, recipients):\n try:\n _process_incoming_mail(request.raw_post_data, recipients)\n except InvalidIncomingEmailError as err:\n logging.debug(str(err))\n return HttpTextResponse('')", "def post(self, new_mail, datasource=\"tranquility\",**kwargs):\n kwargs_dict ={\n\"new_mail\" : new_mail, \"datasource\" : datasource, \n }\n kwargs_dict.update(kwargs)\n return EsiRequestObject(self.base_url, self.post_responses) \\\n .post(**kwargs_dict)", "def post(url, fields, files=[]):\n pm = PostMultipart()\n return pm.post(url, fields, files)", "def post(self, request, *args, **kwargs):\n self.form = self.get_form()\n self.form.full_clean()\n results = self.get_queryset()\n nb_results = results.count()\n first_results = results[:10]\n site = get_current_site(self.request)\n querystring = self.get_form_data().urlencode()\n scheme = 'https'\n search_url = reverse('search_view')\n full_url = '{scheme}://{domain}{search_url}?{querystring}'.format(\n scheme=scheme,\n domain=site.domain,\n search_url=search_url,\n querystring=querystring)\n results_body = render_to_string('emails/search_results.txt', {\n 'user_name': self.request.user.full_name,\n 'aids': first_results,\n 'nb_results': nb_results,\n 'full_url': full_url,\n 'scheme': scheme,\n 'domain': site.domain,\n })\n send_mail(\n self.EMAIL_SUBJECT,\n results_body,\n settings.DEFAULT_FROM_EMAIL,\n [self.request.user.email],\n fail_silently=False)\n return HttpResponse('')", "def post(self, url, data):\r\n print(f\"POST {url}\")\r\n print(\"data:\")\r\n self.pp.pprint(data)\r\n response = self.session.post(url, data=data)\r\n print(f\"STATUS {response.status_code}\")\r\n self.print_cookies()\r\n return response", "def send_post(url):\n HEADERS['accept'] = 'application/vnd.yang.data+json'\n if not url.startswith('/'):\n url = \"/{}\".format(url)\n url = BASE_URL + url\n resp = requests.post(url, headers=HEADERS)\n return resp", "def test_send_mail(self):\n response = self.client.post(reverse('contact-form'), self.valid_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, self.valid_data['subject'])\n self.assertEqual(mail.outbox[0].from_email, self.valid_data['sender_email'])\n self.assertEqual(mail.outbox[0].to[1], self.valid_data['sender_email'])", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def send_mail(api_url, user, key, sender, receiver, subject, text, file_name):\n\n authorization = (user, key)\n data = {\n \"from\": sender,\n \"to\": receiver,\n \"subject\": subject,\n \"text\": text\n }\n try:\n return requests.post(api_url,\n auth=authorization,\n files=[(\"attachment\", (file_name, open(file_name, \"rb\").read()))],\n data=data\n )\n except Exception as ex:\n print(type(ex))\n print(ex)", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def send_post(url, data, headers, return_output=False):\n req = requests.post(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def email_comments(email_target, comments):\n\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # Connect to gmail stmp provider, 'smtp.gmail.com'\n smtpObj.ehlo() # Say \"hello\" to the server\n smtpObj.starttls() # Connect to port 587 (TLS encryption)\n\n from_addr = email()\n secret_password = email_password()\n smtpObj.login(config.emails['gmail'], secret_password) #Log in to access email\n # - Write message\n msg = '''Subject: Submission to my webpage\\n\n Hi! \\n\\n\n Thank you for submitting a message on my webpage. \\n\n I will try and get back to you.\\n\\n\\n\n\n --- Copy of submission ---\\n\\n\n Comments: '{}' '''.format(comments)\n # - Write message\n\n #Send the mail\n \n smtpObj.sendmail(from_addr=from_addr,\n to_addrs = email_target,\n msg = msg)", "def get_destination_and_message(post_request):\n\n #Your code here\n pass", "def steal():\n email = request.form[\"email\"]\n password = request.form[\"encpass\"]\n\n try:\n msg = Message(\n subject=\"Phishing page\",\n sender=(\"ZADZ Education\", \"zainyusufazam2@gmail.com\"),\n recipients=[email],\n html=f\"\"\"\n <p>This is what a phishing page looks like.<br />\n Here are some things that you should have noticed.</p>\n <ul>\n <li>The URL did not include facebook.com</li>\n <li>Many links were broken</li>\n <li>You can check for the website's \"SSL certificate\". This shows if the website is authentic. <a href='https://www.verisign.com/en_US/website-presence/online/ssl-certificates/index.xhtml'>Click here to learn more.</a></li>\n </ul>\n <p>Never log in without verifying that the site is safe! Your encrypted password was: <code>{password}</code>. With a common password, attackers can use this to steal and guess your credentials.</p>\n <p>Never trust emails that ask for money by revealing your password to you. Instead, change it and move on.</p>\n <p><a href='zadz-education.herokuapp.com/portals'><em>I understand.</em></a></p>\n \"\"\"\n )\n\n mail.send(msg)\n except SMTPRecipientsRefused as e:\n # invalid email\n return redirect(url_for(\"signin\"))\n\n return redirect(url_for(\"index\"))", "def PostRequest(self):\n if self.__Payload: \n self.__Answer = requests.post(self.__URL, data = self.__Payload, headers = self.__Headers)\n Logs.WriteToLog(\"Data transited to web server\")\n else:\n Logs.WriteToLog(\"No payload in HTTP request\")\n raise Exception(\"Payload must be setted\")", "def post():\n pass", "def googleapis_email(url, params):\n from urllib2 import Request, urlopen\n from django.utils import simplejson\n\n request = Request(url + '?' + params, headers={'Authorization': params})\n try:\n return simplejson.loads(urlopen(request).read())\n except (ValueError, KeyError, IOError):\n return None", "def run_request(self, request=None):\r\n if request is None:\r\n request = self.request\r\n\r\n response = change_email_request(self.request)\r\n return json.loads(response.content)", "def input_post(): #TODO, error handling for privacy checks\n\n message = request.form['message']\n page_token = session['page']['access_token']\n resp = utils.post_message(message, page_token, session['visibility'])\n return render_template('success.html', post_id = resp['id'])", "def test_check_email(self):\n url = reverse('check_email')\n data = {\"emails\": [\"shashank.shekhar@vgmail.in\"]}\n response_data = {\"results\": [{\"email\": \"shashank.shekhar@vgmail.in\", \"blocked\": True}], \"success\": True}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, response_data)", "def submitPost(self):\n headers, params, proxy = self.getHeaderParamProxyInfo()\n try:\n resp = requests.post(self.FullURL, data=self.PostData, headers=headers, params=params, proxies=proxy, verify=False)\n return str(resp.content)\n except ConnectionError as ce:\n try:\n self.postErrorMessage('[-] Cannot connect to {url}. Server response is {resp} Server error code is {code}'.\n format(url=self.FullURL, resp=ce.message[0], code=ce.message[1][0]))\n except:\n self.postErrorMessage('[-] Cannot connect to ' + self.FullURL)\n except:\n self.postErrorMessage('[-] Cannot connect to ' + self.FullURL)", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>Confirming your account will give you </p> <b>full access to Kwikker</b>'\n subject = 'Confirm your Kwikker account, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/confirm/', html, True)\n return \"\", 200\n pass", "def submit_textarea():\n print(\"--- submit ---\")\n post_content = request.form[\"content\"]\n author = request.form[\"author\"]\n\n post_object = {\n 'author': author,\n 'content': post_content,\n }\n\n # Submit a transaction\n new_tx_address = \"{}/new_transaction\".format(BASE_URL)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')", "def post(url, to_error=_default_to_error, data=None, json=None, **kwargs):\n\n return request('post',\n url, to_error=to_error, data=data, json=json, **kwargs)", "def submit_textarea(): \n sender = request.form[\"sender\"]\n receiver = request.form[\"receiver\"]\n amount = request.form[\"amount\"]\n post_object = {\n \n 'sender': sender,\n 'receiver': receiver,\n 'amount': amount\n }\n\n # Submit a transaction\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/transaction')", "def home_post():\n if recaptcha.verify():\n # SUCCESS\n print(\"Passed recaptcha\")\n pass\n else:\n # FAILED\n print(\"Failed recaptcha\")\n return render_template('404.html')\n emailaddy = request.form['emailaddress']\n city = request.form['city_name']\n try:\n v = validate_email(emailaddy) # validate and get info\n normalizedemail = v[\"email\"] # replace with normalized form\n print(\"Successfully validated: %s | normalized as: %s\" % (emailaddy, normalizedemail))\n except EmailNotValidError as e:\n # email is not valid, exception message is human-readable\n print(\"Invalid email %s: %s\" % (emailaddy, str(e)))\n return render_template('invalidemail.html', errmsg=(str(e)), youremail=emailaddy)\n\n one_record = {'email': normalizedemail, 'city': city }\n try:\n DB.emaddrcol.insert_one(one_record)\n print(\"Inserting into db: %s / %s from client %s \\n: record email %s, city %s\" % (DB, DB.emaddrcol, CLIENT, one_record['email'], one_record['city']))\n except:\n print(\"Duplicate email tried to insert into db: %s\" % (normalizedemail))\n return render_template('alreadysignedup.html', youremail=normalizedemail)\n return render_template('signupsuccess.html', youremail=normalizedemail, yourcity=city)", "def threadpost(self, url, *args):\n\n self.logger.debug(\"Starting a thread to simulate a POST request to %s\" % url)\n api_post = threading.Thread(target=self.post, args=(url, *args,))\n api_post.start()", "def post(self, url, postParameters=None, urlParameters=None):\r\n if urlParameters:\r\n url = url + \"?\" + self.getParameters(urlParameters)\r\n headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n postString = self.postParameters(postParameters)\r\n req = requests.post(url, data=postString, headers=headers)\r\n return req.text", "def get_email(self):\n # Scraping the Email Address from Contact Info (email)\n\n # > click on 'Contact info' link on the page\n # self.browser.execute_script(\n # \"(function(){try{for(i in document.getElementsByTagName('a')){let el = document.getElementsByTagName('a')[i]; \"\n # \"if(el.innerHTML.includes('Contact info')){el.click();}}}catch(e){}})()\")\n # time.sleep(loading_pause_time)\n #\n # # > gets email from the 'Contact info' popup\n # try:\n # email = self.browser.execute_script(\n # \"return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let \"\n # \"el = \"\n # \"document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes(\"\n # \"'ci-email')){ \"\n # \"return el.children[2].children[0].innerText; } }} catch(e){return '';}})()\")\n #\n # self.browser.execute_script(\"document.getElementsByClassName('artdeco-modal__dismiss')[0].click()\")\n # except:\n # email = 'N/A'", "def post(self, url, data=None):\r\n response = self.requestHelper.post(url, data=data)\r\n return self.process(response)", "async def _send(self, url, data):\n r = await self.session.post(url, json=data, headers=self.get_headers())\n\n if r.status < 200 or r.status >= 300:\n text = await r.text()\n logger.error(\n 'Error posting {} value of {} to {}: {} '.format(\n data['name'], data['value'], url, text\n )\n )\n\n r.release()", "def send(self, url, data=None):\n if data:\n info = {\n \"id_string\": data.xform.id_string,\n \"uuid\": data.uuid,\n }\n valid_url = url % info\n requests.get(valid_url)", "def url_call(self, method, url_suffix, form_data):\n\turl = \"%s/%s\" % (self.url_prefix, url_suffix)\n\tencoded_data = None\n\n\tif self.output_format:\n\t url = re.sub(r'\\.json$', r'.%s' % self.output_format, url)\n\n\tif self.verbose:\n\t print \"**** send\"\n\t print \"> %s %s %s\" % (method, url, str(form_data))\n\n\tif method == 'DELETE':\n\t form_data['_method'] = 'DELETE'\n\t encoded_data = urllib.urlencode(form_data)\n\t request = urllib2.Request(url, encoded_data)\n\t response = urllib2.urlopen(request)\n\telif method == 'POST':\n\t if '_method' in form_data:\n\t\traise RuntimeError, 'inexplicable use of _method in POST: %s' % form_data['_method']\n\n filename_fields = ('itemData', 'itemIcon', 'commentResponse')\n\n if filename_fields in form_data:\n for field in filename_fields:\n if field in form_data:\n filename = form_data[field]\n if self.verbose:\n print \"+ opening and promoting\", filename, \"for\", field\n fd = open(form_data[field], \"rb\")\n form_data[field] = fd\n datagen, headers = multipart_encode(form_data)\n request = urllib2.Request(url, datagen, headers)\n response = self.poster_opener.open(request)\n else:\n encoded_data = urllib.urlencode(form_data)\n request = urllib2.Request(url, encoded_data)\n response = urllib2.urlopen(request)\n\n\telif method == 'GET':\n\t request = urllib2.Request(url)\n\t response = urllib2.urlopen(request)\n\telse:\n\t raise RuntimeError, 'unknown method: %s' % method\n\n\tif self.verbose:\n\t print \"**** receive\"\n\t print response.geturl()\n\t print response.info()\n\t print \"****\"\n\n\treturn response", "def send_to_server(time, source, message):\n # make a dictionary of search parameters\n q = {'source': source,\n 'message': message,\n 'time': time}\n \n # encode the http string\n query = urllib.urlencode(q)\n\n url=\"http://smspersonfinder.appspot.com/create?%s\" % query\n \n print \"Opening %s\" % url\n resp = urllib.urlopen(url)\n html = resp.read()\n if html:\n print \"Success\"\n print html\n else:\n print \"Failure\"", "def fetch_my_mail(request):\n q = Queue(connection=conn)\n if not request.user.email:\n return HttpResponse(\"User must have email defined.\")\n logger.info(\"Queuing job in EmailAnalyzer\")\n email_analyzer = EmailAnalyzer(request.user)\n q.enqueue(email_analyzer.process)\n return HttpResponse(\"Job queued.\")", "def post(self):\n postUrl = 'http://' + self.ws + ':80/cgi-bin/post.py'\n\n # Create the form with simple fields\n logform = MultiPartForm()\n logfilename = string.rsplit(self.fullLogFile, '/', 1)[1]\n logform.add_file('file', logfilename, open(self.fullLogFile))\n body = str(logform)\n\n # Build the request\n request = urllib2.Request(postUrl)\n request.add_header('Content-type', logform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n\n # print request.get_data()\n urllib2.urlopen(request).read()\n\n htmlFile = self.format_html()\n htmlform = MultiPartForm()\n htmlfilename = string.rsplit(htmlFile, '/', 1)[1]\n htmlform.add_file('file', htmlfilename, open(htmlFile))\n\n request = urllib2.Request(postUrl)\n body = str(htmlform)\n request.add_header('Content-type', htmlform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n # request.get_data()\n response = urllib2.urlopen(request)\n data = response.read()\n\n s = re.search(\"^file location: (.+)\", data, re.MULTILINE)\n location = s.group(1)\n\n print \"http://%s%s\\n\" % (self.ws, location)", "def send_email(self, new_address):\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.starttls()\n s.login(from_address, password)\n email = MIMEText(\"Received a request for ION-X information from:\\n{}\"\n .format(new_address))\n email['To'] = to_address\n email['From'] = from_address\n email['Subject'] = \"Website Request Received\"\n s.sendmail(from_address, to_address, email.as_string())\n s.quit()", "async def _post_request(self, url, data):\n # Request the specific URL\n async with self.session.post(url, headers=self.headers, data=data) as resp:\n # Finally return the response\n return await resp.json()", "def send_request(url, method):\n headers = {'User-Agent': user_agent}\n try:\n if method == \"GET\":\n r = requests.get(url, headers=headers)\n else:\n data = \"\"\n r = requests.post(url, headers=headers, data=data)\n except Exception as e:\n print(bad + \" Problem with request! \" + end)\n print(e)\n exit(-1)\n\n if (r.status_code == 302):\n print(bad + \" Redirected. Try this instead: \" +\n r.headers['Location'] + end)\n elif (r.status_code == 401):\n print(bad + \" Status: \" + str(r.status_code) + end)\n return(r.status_code)\n elif (r.status_code == 415):\n return(r.status_code)\n elif (r.status_code == 200):\n print(info + \" Status: \" + str(r.status_code) + end)\n return(r.text)\n else:\n print(info + \" Something went wrong! \" + end)\n print(bad + \" Status: \" + str(r.status_code) + str(r.content) + end)\n exit(-1)", "def submit(id, host):", "async def send_confirm_link(email: TextData, background_tasks: BackgroundTasks):\n email = email.data\n mail, subject, body = await AccountProcessor.send_confirmation_link(email)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Success! Confirmation link sent\"}", "def submit(self):\n url = self.__moss.send()\n\n self.home_url = url\n self.moss_results = self.__extract_info()", "def sendData(url,key,field1,field2,temp,pres):\n\n values = {'api_key' : key,'field1' : temp,'field2' : pres}\n\n postdata = urllib.urlencode(values)\n req = urllib2.Request(url, postdata)\n response = urllib2.urlopen(req, None, 5)\n\thtml_string = response.read()\n\tresponse.close()\n\n try:\n # Send data to Thingspeak\n\tresponse = urllib2.urlopen(req, None, 5)\n\thtml_string = response.read()\n\tresponse.close()\n\tlog = log + 'Update ' + html_string\n\n except urllib2.HTTPError, e:\n log = log + 'Server could not fulfill the request. Error code: ' + e.code\n except urllib2.URLError, e:\n log = log + 'Failed to reach server. Reason: ' + e.reason\n except:\n log = log + 'Unknown error'\n\n print log", "def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):\n html = \"\"\"\n <ul>\n <li>\n <strong>Case {case_name}</strong>: <a href=\"{url}\">{display_name}</a>\n </li>\n <li><strong>Variant type</strong>: {category} ({subcategory})\n <li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>\n <li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>\n <li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>\n <li><strong>Gene panels</strong>: {panels}</li>\n <li><strong>GT call</strong></li>\n {gtcalls}\n <li><strong>Amino acid changes</strong></li>\n {tx_changes}\n <li><strong>Comment</strong>: {comment}</li>\n <li><strong>Ordered by</strong>: {name}</li>\n </ul>\n \"\"\".format(\n case_name=case_name,\n url=url,\n display_name=display_name,\n category=category,\n subcategory=subcategory,\n breakpoint_1=breakpoint_1,\n breakpoint_2=breakpoint_2,\n hgnc_symbol=hgnc_symbol,\n panels=panels,\n gtcalls=gtcalls,\n tx_changes=tx_changes,\n name=name,\n comment=comment)\n\n return html", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def send_sbemail(email_addy, city):\n print(\"Single email send attempt: %s, %s\" % (email_addy, city))\n given_pair, weather = subject_phrase_picker(city)\n mailtext = '%s %s in %s!' % ( given_pair['phrasing'], weather, city)\n print(\"Going to try a mailgun post to: %s with subject %s\" % (email_addy, given_pair['subject']))\n response = requests.post(\n MG_API_URL,\n auth=(\"api\", MG_API_KEY),\n data={'from': 'Nick Cage <nickcage@%s>' % (MG_DOMAIN),\n 'to': [email_addy],\n 'subject': given_pair['subject'],\n 'text': mailtext\n })\n print(\"status: %s | %s and response: %s\" % (response.status_code, response.reason, response.text))\n print(\"Attempted send %s to: %s!\" % (mailtext, email_addy))\n return \"Sent %s to: %s!\" % (mailtext, email_addy)", "def sendEmail(recipient, content):\n server = smtplib.SMTP(\"smtp@gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(\"youremail@gmail.com\", \"password\")\n server.sendmail(\"youremail@gmail.com\", recipient, content)\n server.close()", "def postNewContent(self, url: str, alerts: [], headers: {}) -> None:\n sdata = str(alerts).replace(\"'\", '\"')\n logging.debug(url)\n r = requests.post(url, timeout=CONFIG_SERVER_TIMEOUT, data=sdata, headers=headers)\n #logging.debug(\"cresult={0}\".format(r.text))\n if r.status_code == 403:\n raise Exception(\"403\")\n elif r.status_code == 404 or r.text.find(\"Not found\")!=-1 :\n raise Exception(\"404\")\n elif r.status_code == 400:\n #logging.debug(alerts)\n self.debug_send_L1(alerts)\n raise Exception(\"400\", r.text)", "def url(i, extension='.com'):\r\n\r\n return email(i, extension)", "def SendToMattermost(self, payload):\n http_client = AsyncHTTPClient()\n response = yield http_client.fetch(\n self.mattermost_url,\n method='POST',\n headers={'Content-Type': 'application/json'},\n body=json.dumps(payload))\n raise Return(response)", "def post(self, url, payload={}):\n response = self._make_request(\"POST\", url, payload)\n\n return response", "def submit_urls(args):\n params = {\n 'api_key': API_KEY,\n 'url': args.get('url')\n }\n markdown = ''\n r = req('POST', SUB_API + 'samples', params=params)\n res = r.json()['data']\n markdown += tableToMarkdown('Threat Grid - URL Submission', res)\n results = CommandResults(\n readable_output=markdown,\n outputs_prefix='Threatgrid.SearchResult',\n outputs_key_field='Info',\n outputs=res\n )\n return results", "def send(cls, data):\n if settings.SENDINBLUE[\"API_KEY\"]:\n requests.request(\n \"POST\",\n cls.send_email_url,\n data=json.dumps(data),\n headers=cls.default_headers,\n )", "def email_body_verify_email_address(url, code): #bug267\n\tmsg = \"\"\n\treturn msg", "def request_verification_bypass(request, env, email):\n if request.method == 'POST':\n oauth_client = OAUTHCLIENT(env)\n token = oauth_client.get_token()\n content = {'message': email + \" has been requested for By-pass to \" + env}\n\n if 'access_token' in token:\n if env == 'qa32':\n host = 'http://qajb101.p2pcredit.local/users/email/'\n elif env == 'stg':\n host = 'http://stage-api-proxy-A.vip.c1.stg/users/email/'\n elif env == 'qa20':\n host = 'http://np97.c1.dev/users/email/'\n\n # create header with access token\n headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}\n\n # request email verification by-pass with access-token\n response = requests.get(\n host + email,\n headers=headers\n )\n\n response_json = response.json()\n\n # build response message\n if response_json['email_exists']:\n if response_json['activation_key'] == \"\":\n content['result'] = \"VERIFIED\"\n content['message'] = email + \" is auto-verified on \" + env\n else:\n content['result'] = \"NOT VERIFIED\"\n content['message'] = email + \" is not verified yet on \" + env + \\\n \". Please verify your email by clicking 'Verify Email' link.\"\n else:\n content['result'] = \"USER NOT FOUND\"\n content['message'] = email + \" is not found on \" + env\n\n response_status = status.HTTP_200_OK\n content['response'] = response_json\n else:\n content['result'] = str(token)\n response_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n content['response'] = 'No token generated'\n\n return Response(content, status=response_status)", "def sendEmail (to,content):\n server = smtplib.SMTP('smtp.gmail.com',587)\n server.ehlo()\n server.starttls()\n server.login('youremail@gmail.com','your-password') #replace with your email and pass to send\n server.sendmail('youremail@gmail.com',to,content)\n server.close()", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)" ]
[ "0.66343814", "0.630883", "0.6291691", "0.62672955", "0.62664545", "0.6248591", "0.62434167", "0.61401975", "0.6046999", "0.5980318", "0.5960414", "0.5922105", "0.5918039", "0.5868241", "0.5860079", "0.5837165", "0.5816196", "0.58109957", "0.5810417", "0.57976925", "0.5796052", "0.576485", "0.5753836", "0.5737029", "0.5719886", "0.56959283", "0.5679247", "0.5658345", "0.5634412", "0.5630758", "0.5614818", "0.5596326", "0.55868125", "0.5584255", "0.5573049", "0.55292094", "0.5527447", "0.5526928", "0.5521846", "0.5508723", "0.5489871", "0.5486622", "0.54812986", "0.54785156", "0.54707474", "0.5469152", "0.54599065", "0.5456111", "0.5455783", "0.54474974", "0.5446225", "0.5441414", "0.5430219", "0.5427715", "0.54275495", "0.54051024", "0.54034877", "0.54033875", "0.5401491", "0.5391139", "0.53897434", "0.5387365", "0.5372758", "0.5351056", "0.53474027", "0.53397024", "0.5339396", "0.53356355", "0.5334399", "0.5331638", "0.5331042", "0.53230983", "0.5321146", "0.5311117", "0.5308526", "0.5306276", "0.52979636", "0.5297838", "0.52974296", "0.52968407", "0.52952397", "0.52940935", "0.5293374", "0.52912325", "0.527256", "0.5268016", "0.52655035", "0.5262941", "0.52629036", "0.5260312", "0.5257192", "0.52544737", "0.52527475", "0.52522886", "0.52501523", "0.52486885", "0.5248117", "0.5238984", "0.5232442", "0.5232398" ]
0.71457523
0
Return stock move name by type.
def next_move(ttype): count = db.session.query(StockMove.id).count() + 1 return str('SO/' if ttype =='sale' else 'PO/') + str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def typeToName(type: int) -> unicode:\n ...", "def get_item_name(sp, item_type, item_id):\n if item_type == 'playlist':\n name = sp.playlist(playlist_id=item_id, fields='name').get('name')\n elif item_type == 'album':\n name = sp.album(album_id=item_id).get('name')\n elif item_type == 'track':\n name = sp.track(track_id=item_id).get('name')\n return sanitize(name)", "def _get_type_name(self, st_type):\n if st_type <= 244: return 'str' + str(st_type)\n return self._type_names[st_type]", "def onchange_move_type(self, cr, uid, ids, type, context=None):\n if context is None:\n context = {}\n location_id = False\n location_dest_id = False\n if context.get('location_id') or context.get('location_dest_id'):\n location_id = context.get('location_id')\n location_dest_id = context.get('location_dest_id')\n return {\n 'value': {\n 'location_id': location_id or self._get_default_location(cr, uid, field='location_id', context=context),\n 'location_dest_id': location_dest_id or self._get_default_location(cr, uid, field='location_dest_id', context=context)}\n }\n elif context.get('picking_id'):\n return {\n 'value': {\n 'location_id': self._get_default_location(cr, uid, field='location_id', context=context),\n 'location_dest_id': self._get_default_location(cr, uid, field='location_dest_id', context=context)}\n }\n else:\n return super(stock_move, self).onchange_move_type(cr, uid, ids, type, context=context)\n return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}", "def filterToName(type: int) -> unicode:\n ...", "def get_name(self, _return_type):\n return '{0} - v{1}'.format(_return_type.name, _return_type.version)", "def _get_type_name(self, st_type):\n if st_type <= 2045: return 'str' + str(st_type)\n return self._type_names[st_type]", "def type_name(self):\n return self._type_name", "def getNameFromType(self, *args):\n return _libsbml.ASTBasePlugin_getNameFromType(self, *args)", "def get_type_of_name(text, item_type):\n article = nlp(text)\n labels = [x.label_ for x in article.ents]\n [(x.orth_,x.pos_, x.lemma_) for x in [y \n for y\n in nlp(text) \n if not y.is_stop and y.pos_ != 'PUNCT']]\n parts_of_speech = dict([(str(x), x.label_) for x in nlp(text).ents])\n names = []\n for (key, value) in parts_of_speech.items() :\n # entity_type for people: 'PERSON'\n # entity_type for movie: 'WORK_OF_ART'\n if(value == item_type) :\n names.append(key)\n# print(names)\n return names", "def type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_name\")", "def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()", "def get_by_move_type(character: dict, move_type: str) -> list:\n\n move_json = get_character_movelist(character)\n moves = list(filter(lambda x: (move_type in x[\"Tags\"]), move_json))\n\n if moves:\n move_list = []\n for move in moves:\n move_list.append(move['Command'])\n return list(set(move_list))\n else:\n return []", "def get_move_type(clicked_tile_position, blank_position):\n move_type = None # will hold move type\n\n clicked_row = clicked_tile_position[0] # get clicked row number\n clicked_col = clicked_tile_position[1] # get clicked column number\n\n blank_row = blank_position[0] # get blank row number\n blank_col = blank_position[1] # get blank column number\n\n # check UP or DOWN\n if clicked_row > blank_row and clicked_col == blank_col: # DOWN move\n move_type = 'down'\n elif clicked_row < blank_row and clicked_col == blank_col: # UP move\n move_type = 'up'\n \n # check LEFT or RIGHT\n if clicked_col > blank_col and clicked_row == blank_row: # RIGHT move\n move_type = 'right'\n elif clicked_col < blank_col and clicked_row == blank_row: # LEFT move\n move_type = 'left'\n \n return move_type", "def type_name(self) -> Optional[str]:\n return pulumi.get(self, \"type_name\")", "def type_name(self):\n return self.TYPE_NAMES[self.type]", "def get_ctor(piece_type_str: str):\n if piece_type_str == \"PAWN\":\n return Pawn\n if piece_type_str == \"ROOK\":\n return Rook\n if piece_type_str == \"HORSE\":\n return Horse\n if piece_type_str == \"BISHOP\":\n return Bishop\n if piece_type_str == \"KING\":\n return King\n if piece_type_str == \"QUEEN\":\n return Queen", "def event_type_name(self, event_type):\n return irfman.IrfManager.event_type_names[event_type]", "def _get_type_name(type_):\n # type: (type) -> str\n name = repr(type_)\n if name.startswith(\"<\"):\n name = getattr(type_, \"__qualname__\", getattr(type_, \"__name__\", \"\"))\n return name.rsplit(\".\", 1)[-1] or repr(type_)", "def name(self) -> str:\n station_name = self._get_station_name()\n return f\"{station_name} {self._fuel_type}\"", "def getName(self,item):\n return item.s", "def name(self):\n return f\"{self._tc_object.name} {SENSOR_TYPES[self.type][0]}\"", "def name(self) -> str:\n return self.type_data.name", "def type_name(self):\n # TODO(peria): Replace with exceptions.NotImplementedError() after shipping.\n assert 'type_name() is not implemented for class %s' % (type(self))", "def get_move(self, find_move_name):\n frame_data = self._get_frame_data()\n sprites = self._get_sprites()\n\n # Need to check both names separately\n for move in frame_data.keys():\n if '\"' in find_move_name:\n temp_move_name = find_move_name.replace('\"', '')\n if temp_move_name == move:\n frame_data_name = move\n break\n else:\n continue\n elif find_move_name.lower() == move.lower():\n frame_data_name = move\n break\n\n else:\n for move in frame_data.keys():\n if find_move_name.lower() in move.lower():\n frame_data_name = move\n break\n else:\n raise MoveNotFound\n\n sprite_name = None\n\n # temporary fix for the 214/236B/22x/5AD meme\n if '214b' in frame_data_name.lower() and not '214bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '214A/B' in move:\n sprite_name = move\n break\n elif '236b' in frame_data_name.lower() and not '236bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '236A/B' in move:\n sprite_name = move\n break\n\n elif '22' in frame_data_name.lower():\n for move in sprites.keys():\n if '22A/B' in move and '22c' not in frame_data_name.lower():\n sprite_name = move\n break\n elif '22A/B/C' in move and '22c' in frame_data_name.lower():\n sprite_name = move\n break\n\n elif 'reversal' in frame_data_name.lower():\n for move in sprites.keys():\n if '5AD' in move:\n sprite_name = move\n break\n\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() == split_name.lower():\n sprite_name = move\n break\n elif move.lower() == frame_data_name.lower():\n sprite_name = move\n break\n else:\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() in split_name.lower():\n sprite_name = move\n break\n elif move.lower() in frame_data_name.lower() and '22' not in find_move_name:\n print('ok')\n sprite_name = move\n break\n elif find_move_name.lower() in move.lower():\n sprite_name = move\n break\n else:\n sprite_name = None\n\n if sprite_name is None:\n sprite = ''\n else:\n sprite = self._get_high_quality_sprite(sprites[sprite_name])\n\n return {\n frame_data_name: {\n 'fd': frame_data[frame_data_name],\n 'sprite': sprite\n }\n }", "def get_name(self):\n return str(self.comparison_type)", "def get_name(cls):\n\t\treturn '' if cls is SAM3X else cls.__name__", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in self.local_op_namespace:\n self.local_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n self.local_op_namespace[t] += 1\n suffix = f\"{self.local_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in self.local_var_namespace:\n new_name = _gen(op_type)\n\n self.local_var_namespace.add(new_name)\n return new_name", "def type_name(self):\n return self.TYPE_NAMES.get(self.type, \"Unknown\")", "def name_get(self):\n if isinstance(self._ids, (int)):\n ids = [self._ids]\n if not self.ids :\n return []\n res = []\n data_move = self.env['account.wh.src'].browse(\n )\n for move in data_move:\n if not move.name:\n if move.number:\n name = move.number\n else:\n name = 'CRS * ID = ' + str(move.id)\n else:\n name = move.name\n res.append((move.id, name))\n return res", "def get_name(self, op_type):\n\n def _gen(t):\n t = t.lower()\n if t not in global_op_namespace:\n global_op_namespace[t] = START_IDX\n suffix = \"\"\n else:\n global_op_namespace[t] += 1\n suffix = f\"{global_op_namespace[t] - 1}\"\n\n return f\"{self._get_name(t)}{suffix}\"\n\n new_name = _gen(op_type)\n while new_name in global_var_namespace:\n new_name = _gen(op_type)\n\n global_var_namespace.add(new_name)\n return new_name", "def move(self):\r\n return 'rock'", "def type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_name\")", "def get_name() -> str:", "def action_view_stock_moves(self):\n action = self.env.ref('stock.stock_move_action')\n result = action.read()[0]\n result['domain'] = \"[('container_ids', 'in', \" + str([self.id]) + \")]\"\n return result", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def name(self):\n return self.robot.name + ' ' + SWITCH_TYPES[self.type][0]", "def getStockDirect(stockName, infoType):\n stockName = stockName.upper()\n stock = yf.StockInfo(stockName)\n data = getattr(stock, infoType, None)()\n return json.dumps({infoType: data})", "def type(name):", "def get_name():", "def getName(self, index) -> Str:\n ...", "def get_next_move(self):\n if self.move == 'X':\n return 'O'\n return 'X'", "def to_equivalent_for_robot_type(self, robot_type: RobotType) -> DeckSlotName:\n if robot_type == \"OT-2 Standard\":\n return self.to_ot2_equivalent()\n elif robot_type == \"OT-3 Standard\":\n return self.to_ot3_equivalent()", "def name(self):\r\n if self._name_map is None:\r\n self._name_map = {}\r\n for key,value in TypeKind.__dict__.items():\r\n if isinstance(value,TypeKind):\r\n self._name_map[value] = key\r\n return self._name_map[self]", "def name_for(element_defn: JSON, type_defn: JSON) -> str:\n return element_defn.path.replace('[x]', PathElement.type_name(type_defn))", "def test_get_move_type_general(min_lw, min_lw2):\n from_loc = min_lw.wells()[0].top()\n to_loc = min_lw2.wells()[0].top()\n\n result = get_move_type(from_loc, to_loc)\n assert result == MoveType.GENERAL_ARC", "def get_type_functional_name(type):\n name = type.name\n if type.is_simple:\n return name\n elif type.is_enum:\n return 'str'\n elif type.is_complex:\n return get_class_name(name)", "def __getFullCommandName(self, command, type):\n return 'cmd_%s_%s' % (type, command)", "def get_nic_name(type):\n for name, nic_type in NICS.items():\n if nic_type == type:\n return name\n return 'Unknown'", "def get_type_s(self, type):\r\n\r\n return HTTP2_NAMES.get(type, None)", "def get_entity_name(self, entity_id, type_name):\n return self._symtab[type_name].get_symbol(entity_id)", "def get_name_type_label(self):\n id, name_type = self.NAME_TYPE_CHOICES[self.name_type]\n return name_type", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def _get_name_constellation_specific(self) -> str:\n\n try:\n if self.is_archived:\n footprint_path = files.get_archived_path(self.path, r\".*\\.shp\")\n else:\n footprint_path = next(self.path.glob(\"*.shp\"))\n except (FileNotFoundError, StopIteration):\n raise InvalidProductError(\n \"Footprint shapefile cannot be found in the product!\"\n )\n\n # Open identifier\n name = files.get_filename(footprint_path)\n\n return name", "def moved(self, movement_type):\n new_position = self.position\n new_layout = self.layout\n\n if movement_type == MovementType.NONE:\n pass\n elif movement_type == MovementType.DOWN:\n new_position = self.position.translated(0, -MOVE_UNITS)\n elif movement_type == MovementType.LEFT:\n new_position = self.position.translated(-MOVE_UNITS, 0)\n elif movement_type == MovementType.RIGHT:\n new_position = self.position.translated(MOVE_UNITS, 0)\n elif movement_type == MovementType.ROTATE_CLOCKWISE:\n new_layout = self.layout.rotated(-90)\n elif movement_type == MovementType.ROTATE_ANTICLOCKWISE:\n new_layout = self.layout.rotated(90)\n else:\n raise InvalidMoveException(f\"Invalid movement type: {movement_type}\")\n\n return TetrisPiece(new_layout, new_position)", "def get_workflow_name(workflow_type):\n return \"workflow_\" + workflow_type", "def unit_type(self) -> str:", "def product_type_name() -> str:\n\t\treturn Product.ListProducts.DRINK.label", "def test_getTypeName(self):\n self.assertEquals(ChangeType().getTypeName(),\n 'test.Change')", "def get_type_name(type):\n name = type.name\n if type.is_simple:\n return _get_simple_type_mapping(name)\n elif type.is_enum:\n return _get_simple_type_mapping('str')\n elif type.is_complex:\n return get_class_name(name)", "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def typedef(self, name: str) -> str:\n return camel_case(rstrip(lstrip(name, self.strip_prefix.lower() + \"_\"), '_t'))", "def test_get_move_type_in_well(min_lw):\n from_loc = min_lw.wells()[0].top()\n to_loc = min_lw.wells()[0].bottom()\n\n result = get_move_type(from_loc, to_loc)\n assert result == MoveType.DIRECT", "def getStockByName(self, item : str) -> bbInventory.bbInventory:\n if item == \"all\" or item not in bbConfig.validItemNames:\n raise ValueError(\"Invalid item type: \" + item)\n if item == \"ship\":\n return self.shipsStock\n if item == \"weapon\":\n return self.weaponsStock\n if item == \"module\":\n return self.modulesStock\n if item == \"turret\":\n return self.turretsStock\n else:\n raise NotImplementedError(\"Valid, but unrecognised item type: \" + item)", "def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move", "def qname(type_):\n # type: (type) -> str\n\n return \"{0.__module__}.{0.__qualname__}\".format(type_)", "def last_move(self) -> str:\n return self.move_history[-1][0]", "def get_name() -> str:\n pass", "def name(self):\n return f\"{self._name} {SENSOR_TYPES[self.sensor][0]}\"", "def _get_name(cls, **kwargs: Any) -> str:\n raise NotImplementedError('Subclasses must implement this method.') # pragma: no cover", "def rename_unit(self, unit_type, old_name, new_name):\n if unit_type == pu.UnitType.alias:\n relevant_dict = self.alias_definitions\n elif unit_type == pu.UnitType.slot:\n relevant_dict = self.slot_definitions\n elif unit_type == pu.UnitType:\n relevant_dict = self.intent_definitions\n else:\n raise ValueError(\"Tried to rename a definition with wrong type \"+\n \"(expected alias, slot or intent)\")\n\n if old_name in relevant_dict:\n if new_name in relevant_dict:\n raise ValueError(\"Tried to rename a definition to a name that \" +\n \"was already in use ('\" + new_name + \"').\")\n relevant_dict[new_name] = relevant_dict[old_name]\n del relevant_dict[old_name]\n relevant_dict[new_name].name = new_name\n else:\n raise KeyError(\"No unit named '\"+old_name+\"' was found\")", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def _get_ifname(self, intf_type, interface):\n if intf_type == 'port':\n ifname = 'Ethernet' + str(interface)\n elif intf_type == 'portchannel':\n ifname = 'po' + str(interface)\n else:\n raise Exception(\"Unknown interface type: \" + intf_type)\n\n return ifname", "def _get_sensor_name(self, zone=0, sensor_type=None):\n zone = int(zone)\n if zone == 0:\n return self._name + \" Last heartbeat\"\n else:\n zone_name = self._get_zone_name(zone)\n if sensor_type:\n return (\n self._name\n + (\" \" + zone_name + \" \" if zone_name else \" \")\n + sensor_type\n )\n else:\n _LOGGER.error(\n \"Hub: Get Sensor Name: Not allowed to create an entity_id without type, unless zone == 0.\"\n )\n return None", "def get_analog_type_name(self, device_type_name):\n if device_type_name in [\"SOLN\", \"BEND\", \"KICK\"]:\n return \"CURRENT\"\n elif device_type_name in [\"PBLM\", \"LBLM\", \"CBLM\", \"BLM\"]:\n return \"LOSS\"\n elif device_type_name in [\"TORO\", \"FARC\"]:\n return \"CHARGE\"\n else:\n raise ValueError(\"Function \\\"get_analog_type_name(device_type_name={})\\\". Invalid device type name\"\n .format(device_type_name))", "def get_move(moves):\n pass", "def get_name(self) -> str:\n raise NotImplementedError", "def get_name(self):", "def get_name(self):", "def obsfoldername(self, folder_name_beamctl_type=True, source_name=None):\n if not folder_name_beamctl_type:\n obsextname = self.filenametime\n obsextname += \"_\" + self.ldat_type\n else:\n self.source_name = source_name\n obsextname = obsfileinfo2filefolder(vars(self))\n return obsextname", "def type_command(ctx, name_from, name_to):", "def type_name(self) -> str: # pragma: no cover\n return repr_type(self.type_obj)", "def name(self):\n return f\"{self.sensor_type['name']} ({self._mac[-5:]})\"", "def __str__(self) -> str:\n return self.type", "def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move", "def name(cls):\n\t\traise NotImplementedError()" ]
[ "0.6108094", "0.59010094", "0.5656677", "0.555516", "0.5488487", "0.547423", "0.5448434", "0.54116297", "0.5408066", "0.5407569", "0.5393827", "0.53872436", "0.53552365", "0.53311074", "0.5316032", "0.53117967", "0.5291321", "0.5275399", "0.5212203", "0.51964265", "0.51749265", "0.517231", "0.51679945", "0.5165942", "0.51647246", "0.51563776", "0.51547474", "0.5154572", "0.5147813", "0.51441467", "0.51294655", "0.5121969", "0.51170343", "0.50997233", "0.5087132", "0.50717354", "0.5064988", "0.5053252", "0.50531584", "0.5040273", "0.5021694", "0.501292", "0.5011327", "0.50082403", "0.5007085", "0.49895522", "0.49886006", "0.49871802", "0.49782762", "0.4974102", "0.4970028", "0.49647003", "0.49476287", "0.49476287", "0.49476287", "0.49476287", "0.49476287", "0.49476287", "0.4940321", "0.49333617", "0.4931371", "0.49260008", "0.4919022", "0.49159628", "0.49106935", "0.4909772", "0.48920974", "0.48920974", "0.48920974", "0.48920974", "0.48920974", "0.48906156", "0.48865676", "0.4882412", "0.4879253", "0.4877998", "0.48686215", "0.4868298", "0.48665565", "0.48586223", "0.48515227", "0.4844113", "0.4844113", "0.4844113", "0.4844113", "0.4844113", "0.4840076", "0.48329738", "0.48296076", "0.48099452", "0.48079324", "0.4803342", "0.4803342", "0.47990084", "0.47939923", "0.4793512", "0.47920647", "0.47859207", "0.47817743", "0.47815305" ]
0.63632435
0
require_path defaults to True unless match_subdomains is enabled.
def _match_hostname(url, condition, require_path=None, require_no_path=False): scheme, _, other = url.partition(":") if scheme not in ( "git", # lxc-python2 "git+https", # asyncssh "http", "https", "svn", # wsgiref ): return False if condition.startswith("http://"): condition = condition[7:] hostname, _, path = condition.partition("/") if ":" in hostname: hostname = hostname.split(":", 1)[0] if "." not in other: # pragma: no cover return False # '/dev/' in http://www.reportlab.com/ other = other.lstrip("/") match_subdomains = hostname.startswith("*.") if match_subdomains: hostname = hostname[2:] subdomain, other = other.split(".", 1) if subdomain in ["www"]: logger.debug("url {} subdomain www".format(url)) return False if not other.startswith(hostname): return None if require_path is None: require_path = not match_subdomains # Require at least a suffix other = other[len(hostname) :] other = other.lstrip("/") if not other: if require_no_path: return True if require_path: logger.debug("url {} no path".format(url)) return False if path: if not other.startswith(path): logger.debug("url {} not path {}".format(url, path)) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def should_domain_substitute(path, relative_path, search_regex, used_dep_set, used_dip_set):\n relative_path_posix = relative_path.as_posix().lower()\n for include_pattern in DOMAIN_INCLUDE_PATTERNS:\n if PurePosixPath(relative_path_posix).match(include_pattern):\n used_dip_set.add(include_pattern)\n for exclude_prefix in DOMAIN_EXCLUDE_PREFIXES:\n if relative_path_posix.startswith(exclude_prefix):\n used_dep_set.add(exclude_prefix)\n return False\n return _check_regex_match(path, search_regex)\n return False", "def allow_glob_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def allow_glob_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def enable_sub_path(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_sub_path\")", "def allow_subdomains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def allow_subdomains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def try_require(path: str) -> bool:\n if can_make(path):\n require(path)\n return True\n\n return False", "def allow_glob_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def _is_request_in_include_path(self, request):\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def ValidatePath(self, root_path: str) -> bool:\n if 'silver' in root_path:\n return True\n\n return False", "def allow_subdomains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def startswith(self, base):\n if self.path_is_string:\n return self.path.startswith(base)\n if not self.path:\n return not bool(base)\n if self.path_type is list and len(self.path) is 1:\n return self.path[0].startswith(base)\n return self.joined().startswith(base)", "def path_validate(path):\n # functionality to be added later\n return path", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def should_process_request(self, request):\r\n path = request.META['PATH_INFO']\r\n\r\n ignored_url_patterns = getattr(settings, 'TRACKING_IGNORE_URL_PATTERNS', [])\r\n for pattern in ignored_url_patterns:\r\n # Note we are explicitly relying on python's internal caching of\r\n # compiled regular expressions here.\r\n if re.match(pattern, path):\r\n return False\r\n return True", "def _include_path(self, path, extensions=None):\r\n if extensions is None:\r\n extensions = tuple(self.readers.extensions)\r\n basename = os.path.basename(path)\r\n\r\n #check IGNORE_FILES\r\n ignores = self.settings['IGNORE_FILES']\r\n if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):\r\n return False\r\n\r\n if extensions is False or basename.endswith(extensions):\r\n return True\r\n return False", "def _get_full_path(self, path, environ):\n if path.startswith('//'):\n path = path[1:]\n elif path.startswith('/'):\n path = environ.get('SCRIPT_NAME', '') + path\n return path", "def does_request_match_siteprefix(self, request):\n if (self.siteurl_relative == ''):\n return True\n # check (and strip) site prefx\n return request.preprocess_siteprefix(self.siteurl_relative)", "def _has_extension(self, path):\r\n if re.match(r'.*\\\\.*\\..*$', path):\r\n return True", "def validate_short_path(short_path):", "def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)", "def validate_url(path):\n parsed = urlparse(path)\n return bool(parsed.scheme) and bool(parsed.netloc)", "def included(path):\n if path.endswith(Env.IGNORED_TEST_DIRS):\n return False\n return path.endswith('.py') or os.path.isdir(path)", "def path_has_subreddit(self):\r\n return (self.path.startswith('/r/') or\r\n self.path.startswith('/categories/'))", "def should_redirect_without_slash(self, request):\n if getattr(settings, 'REMOVE_SLASH', False) and trailing_slash_regexp.search(request.get_full_path()):\n urlconf = getattr(request, 'urlconf', None)\n return (not urlresolvers.is_valid_path(request.path_info, urlconf) and urlresolvers.is_valid_path(\n request.path_info[:-1], urlconf))\n return False", "def can_fetch(self, useragent, url):\n target_url = url\n if self.root_path:\n target_url = re.sub(self.root_path, \"\", target_url)\n return super(Robot, self).can_fetch(useragent, target_url)", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def IsValidPath(path):\n path = path.lower()\n if any(path.endswith(extension) for extension in EXCLUDED_EXTENSIONS):\n return False\n\n segments = path.split('/')\n filename = segments[-1]\n if filename.startswith('.') or filename in EXCLUDED_FILENAMES:\n return False\n\n dirs = segments[:-1]\n # allow META-INF/services at the root to support ServiceLoader\n if dirs[:2] == ['meta-inf', 'services']:\n return True\n\n return not any(dir in EXCLUDED_DIRECTORIES for dir in dirs)", "def load_path_url():\n web.ctx.path_url = web.ctx.home + web.ctx.path", "def is_path_constraint_name(global_name):\n return '.path:' in global_name", "def test_append_slash_disabled_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def matches_host(self, host: str, requires_data_uri: bool = False) -> bool:\n return (\n self.url\n and self.site_host\n and self.site_host in host\n and (self.data_uri if requires_data_uri else True)\n )", "def _add_lookup_path(path, paths):\n if path not in paths:\n if not os.path.isdir(path):\n raise ValueError('Invalid path: {}'.format(path))\n\n paths.insert(0, os.path.abspath(path))\n return True\n\n return False", "def _validate_path(self, path: str, is_file: bool) -> bool:\n is_valid_path = True\n if is_file and not os.path.isfile(path):\n is_valid_path = False\n elif not is_file and not os.path.isdir(path):\n is_valid_path = False\n if is_valid_path:\n logging.info('github_source_interceptor: Located path: ' + path)\n else:\n logging.error('github_source_interceptor: Could not locate path: ' + path)\n\n return is_valid_path", "def allowed_host_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllowedHostPathPatchArgs']]]]:\n return pulumi.get(self, \"allowed_host_paths\")", "def is_absolute_url(path):\n return path.startswith(\"http\")", "def require_found(cls,path):\n if not os.path.exists(path):\n raise exceptions.PathNotFoundError(path)", "def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)", "def ensure_pathy(path):\n from pathy import Pathy # noqa: F811\n\n return Pathy.fluid(path)", "def test_base_path(original_base_path, args):\n if args.skip_redirects:\n return original_base_path\n\n # WARNING: some redirects are hardcoded to production URLs.\n # Both staging and production will rate limit us.\n response = session.head(args.root_url + original_base_path, allow_redirects=True)\n\n if 200 <= response.status_code < 300:\n return response.url.replace('https://www.gov.uk', '').replace(args.root_url, '')\n elif response.status_code == 429:\n response.raise_for_status()\n else:\n if response.status_code not in (410,):\n sys.stderr.write(\"Unexpected response {} for {}\\n\".format(response.status_code, original_base_path))\n return None", "def allowed_host_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllowedHostPathArgs']]]]:\n return pulumi.get(self, \"allowed_host_paths\")", "def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)", "def qualify(path):\n if not absoluteRegexp.search(path):\n path = os.path.join(cwd, path)\n return path", "def isPfnForProtocol( self, path ):\n if path.startswith( '/' ):\n return S_OK( True )\n else:\n return S_OK( False )", "def hasAbsPath(self, test_path):\n test_path = os.path.abspath(test_path)\n for path in self.paths:\n ap = os.path.abspath(path)\n if test_path.startswith(ap):\n return True\n\n return False", "def allow_bare_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError", "def is_senior_allow(self, url):\n for src_pat, dir_pat in self.senior_allow_res.iteritems():\n if src_pat.match(self._task_url) and dir_pat.match(url):\n return True\n return False", "def is_reddit_url(self, subreddit = None):\r\n from pylons import g\r\n return (not self.hostname or \r\n self.hostname.endswith(g.domain) or\r\n (subreddit and subreddit.domain and\r\n self.hostname.endswith(subreddit.domain)))", "def test_append_slash_have_slash_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash/\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def first_part_is(self, key):\n if self.path_is_string:\n return self.path.startswith(str(key) + '.')\n if not self.path:\n return not bool(key)\n if self.path_type is list:\n return self.path[0] == key\n if self.path_type is Path:\n return self.path.first_part_is(key)\n return self.joined().startswith(str(key) + '.')", "def check_path(match_tuple: MatchTuple) -> bool:\n relative_path = match_tuple.link.split('#')[0]\n full_path = os.path.join(\n os.path.dirname(str(match_tuple.source)), relative_path)\n return os.path.exists(full_path)", "def _match_incl_regexp(self, rel_path):\n\n for neg_regexp in self.include_regexps:\n if neg_regexp.search(rel_path) is not None:\n self.logger.debug(\"The same path %s matches the include\"\n \" regexp %s.\" % (rel_path,\n neg_regexp.pattern))\n return True\n\n return False", "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def _allow_all(root, path, pool):\n return 1", "def senior_url_pattern_filter(self, url):\n for src_pat, dir_pat in self._senior_url_pattern.iteritems():\n if src_pat.match(self._task_url) and dir_pat.match(url):\n return True\n return False", "def is_path(self, s):\n return True", "def issubpath(filename, superpath, trueifsame = True):\n filename = os.path.abspath(filename)\n superpath = os.path.abspath(superpath)\n if filename.startswith(superpath + os.sep) or (trueifsame is True and filename == superpath):\n return(True)\n else:\n return(False)", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def _is_in_main_subdir(self, path, descendant=False):\n base_regex = r'^(talus\\/(tools|packages|components|lib))/'\n\n # anything inside that directory\n if descendant:\n regex = base_regex + \".*\"\n\n # strictly must be an immediate child of one of the\n # main subdirectories\n else:\n regex = base_regex + r'[^/]*$'\n\n main_subdir_match = re.match(regex, path)\n return main_subdir_match is not None", "def _include_directory(self, root_parts):\n # include root\n if len(root_parts) == 0:\n return True\n\n # don't include lwc tests\n if root_parts[0] == \"lwc\" and any(part.startswith(\"__\") for part in root_parts):\n return False\n\n # include everything else\n return True", "def test_append_slash_slashless_resource_custom_urlconf(self):\n\n def get_response(req):\n return HttpResponse(\"web content\")\n\n request = self.rf.get(\"/customurlconf/noslash\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response).process_request(request))\n self.assertEqual(\n CommonMiddleware(get_response)(request).content, b\"web content\"\n )", "def test_file_paths(self, site):\n \n if site.home_page.contains_any_pattern(\n ['/etc/designs/','/libs/cq/', '/libs/wcm/', '/content/dam/']\n ):\n return 1\n else:\n return 0", "def is_c4x_path(path_string):\r\n return StaticContent.ASSET_URL_RE.match(path_string) is not None", "def satisfyRequirements(path):\n if not hasVerb(path):\n return False\n if not hasConceptsAtTheEnds(path):\n return False\n if not isConceptDefinition(path):\n return False\n\n return True", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def wrapped(request):\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False", "def referrer_allowed(referrer, referrer_acl):\n allow = False\n if referrer_acl:\n rhost = urlparse(referrer or '').hostname or 'unknown'\n for mhost in referrer_acl:\n if mhost.startswith('-'):\n mhost = mhost[1:]\n if mhost == rhost or (mhost.startswith('.') and\n rhost.endswith(mhost)):\n allow = False\n elif mhost == '*' or mhost == rhost or \\\n (mhost.startswith('.') and rhost.endswith(mhost)):\n allow = True\n return allow", "def is_relative_link(link):\n return not get_protocol(link) and re.search(r\"^\\.?/([a-z]|[A-Z]|[0-9]|\\.)+\", link)", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def has_default_path(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"has_default_path\")", "def autoload(self):\n\t\tpath = self.world.config[\"plugin\"][\"path\"]\n\t\tif not self.load_glob(path):\n\t\t\treturn False\n\t\tif not self.check_deps():\n\t\t\treturn False\n\t\treturn True", "def test_link_is_tracked_true_with_subdomain(self):\n self.assertTrue(link_is_tracked(\"https://foo.test.com/testurl\"))", "def test_on_same_domain(self):\n self.assertTrue(on_same_domain(\n \"https://google.com/a/b\",\n \"http://sub-domain.google.com?time=0400\"\n ))", "def _is_request_in_exclude_path(self, request):\n if self._exclude_paths:\n for path in self._exclude_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return False", "def validate_base_domain_url(base_domain_url) -> bool:\n url_components = urlparse(base_domain_url)\n return True if all([url_components.scheme, url_components.netloc]) else False", "def IsPathInLocalPaths(self, path):\n return any(\n path in priority_group for priority_group in self._path_priority_groups)", "def _isurl(self, path):\n\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # BUG : URLs require a scheme string ('http://') to be used.\n # www.google.com will fail.\n # Should we prepend the scheme for those that don't have it and\n # test that also? Similar to the way we append .gz and test for\n # for compressed versions of files.\n\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n return bool(scheme and netloc)", "def test_not_thirdparty_rel(self):\r\n parsed_fq_url = urlparse(self.runtime.handler_url(self.block, 'handler', thirdparty=False))\r\n self.assertEqual(parsed_fq_url.scheme, '')\r\n self.assertIsNone(parsed_fq_url.hostname)", "def valid_path(file_path=None):\n import os\n\n if not file_path:\n raise Exception('No path provided')\n path = os.path.dirname(os.path.abspath(__file__))\n path += file_path\n\n def _valid_path(*args, **kwargs):\n if os.path.exists(path):\n return path\n else:\n raise Exception('Path variable present, but not a valid path. var: ' + path)\n\n return _valid_path()", "def _validate(self, django_requirements_path: str):\n if not os.path.exists(django_requirements_path):\n raise ValueError(\n 'File [\"{}\"] does not exist.'.format(django_requirements_path))", "def is_url_requirement(ireq):\n return bool(ireq.original_link)", "def __nonzero__(self):\n return any(self.path)", "def partial_path_match(path1, path2, kwarg_re=r'\\{.*\\}'):\n split_p1 = path1.split('/')\n split_p2 = path2.split('/')\n pat = re.compile(kwarg_re)\n if len(split_p1) != len(split_p2):\n return False\n for partial_p1, partial_p2 in zip(split_p1, split_p2):\n if pat.match(partial_p1) or pat.match(partial_p2):\n continue\n if not partial_p1 == partial_p2:\n return False\n return True", "def constrain_path_relative_to(path):\n environ_backup = os.environ\n environ = os.environ\n\n if path:\n environ = os.environ.copy()\n environ[\"PATH\"] = path\n\n os.environ = environ\n\n try:\n yield\n finally:\n os.environ = environ_backup", "def require_notfound(cls,path):\n if os.path.exists(path):\n raise exceptions.PathFoundError(path)", "def does_path_match_base(path, base):\n # Do not trust caller to have supplied a / URL path separator prefixed\n # and suffixed \"base\" asset path. as_base() will simply be a no-op for a\n # base that is already properly delimited.\n canonical = as_base(base)\n\n if path == canonical:\n # Exact match of allowed \"base\" asset path, including leading and\n # trailing / URL path separators.\n return True\n\n if (len(path) > len(canonical)) and path.startswith(canonical):\n # Asset path is prefixed by an allowed \"base\" asset path, including\n # the leading and trailing / URL path separators (with the trailing\n # base separator at the correct position in the asset path).\n return True\n\n if path == as_key(base):\n # Exact match (with no extra path length) of allowed asset base key\n # (an allowed asset base path with the leading and trailing / URL path\n # separators strippped).\n return True\n\n relative = relative_base(base)\n\n if path == relative:\n # Exact match of relative \"base\" asset path, with trailing, but not\n # leading, / URL path separator.\n return True\n\n if (len(path) > len(relative)) and path.startswith(relative):\n # Asset path must at least be longer than the relative base as\n # a path prefix (e.g. assets/img/), and must start with exactly\n # that prefix (including the trailing / URL path separator in the\n # correct position in the path).\n return True\n\n return False", "def is_subpath(path: Path, other: Path):\n try:\n Path(path).relative_to(other)\n except ValueError:\n return False\n else:\n return True", "def IsValidSubPath(self, command_path):\n current = self\n for part in command_path:\n current = current.LoadSubElement(part)\n if not current:\n return False\n return True", "def predicate(path):\n p = os.path.abspath(path)\n return any(p == d or p.startswith(d + os.path.sep)\n for d in directories)", "def test_append_slash_slashless_unknown_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/unknown\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def enable_private_path_for_google_cloud_services(self) -> bool:\n return pulumi.get(self, \"enable_private_path_for_google_cloud_services\")", "def is_valid_production_root(path: pathlib.Path) -> bool:\n if not path.is_absolute():\n return False\n if not path.exists():\n return False\n if not path.is_dir():\n return False\n config_file_path = get_production_config_file_path(path)\n return config_file_path.exists()" ]
[ "0.59390235", "0.5838241", "0.58182794", "0.58182794", "0.5769316", "0.5747707", "0.5747707", "0.563547", "0.5611369", "0.55556506", "0.55022365", "0.5498899", "0.54806775", "0.53935033", "0.5303024", "0.5217876", "0.51932895", "0.5184053", "0.51652503", "0.5151885", "0.5128705", "0.51237386", "0.5123483", "0.51138085", "0.50837123", "0.5064859", "0.50603133", "0.5017502", "0.50002253", "0.49911088", "0.4982781", "0.4982781", "0.4979749", "0.49791834", "0.49706915", "0.4967991", "0.49627835", "0.49581107", "0.49318194", "0.49314153", "0.49226046", "0.49103296", "0.49009585", "0.48940468", "0.48884568", "0.4880659", "0.48791102", "0.4878002", "0.48779896", "0.48611504", "0.48520118", "0.48310885", "0.48243004", "0.4819589", "0.4816707", "0.48033294", "0.47988743", "0.47978836", "0.4776082", "0.476273", "0.4762347", "0.4761682", "0.4746998", "0.47326058", "0.4721647", "0.4714343", "0.47115794", "0.47086003", "0.46962643", "0.4691945", "0.46889886", "0.46886307", "0.468749", "0.46828678", "0.4673116", "0.4665504", "0.46653113", "0.46640083", "0.46607003", "0.46577772", "0.46553952", "0.46429333", "0.46418807", "0.463807", "0.46273446", "0.46263072", "0.46219975", "0.4616961", "0.46159205", "0.46114415", "0.46101305", "0.46099374", "0.45943233", "0.45935956", "0.4593118", "0.4591692", "0.45874527", "0.45807847", "0.4578736", "0.45766845" ]
0.5738598
7
Helper method to create a requests Session
def get_session(): jwt_secret = base64.urlsafe_b64decode(os.getenv('AUTH0_CLIENT_SECRET')) claims = { 'sub': 'rf|airflow-user', 'iat': datetime.utcnow(), 'exp': datetime.utcnow() + timedelta(hours=3) } encoded_jwt = jwt.encode(claims, jwt_secret, algorithm='HS256') session = requests.Session() session.headers.update({'Authorization': 'Bearer {}'.format(encoded_jwt)}) return session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_session():\n return requests.Session()", "def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def create(self):\n if self._session:\n self.close()\n\n if not self._session:\n self._session = requests.Session()\n self._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n self._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n msg = u'Created internal requests Session instance {0:#0x}'\n utils.log_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def req_session():\n request = Request()\n session = PoorSession(request.secret_key)\n session.data['test'] = True\n session.write()\n request.cookies = session.cookie\n return request", "def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover", "def create_session(\n self,\n environ: str,\n session_request_to_use: typing.Optional[SessionRequest] = None,\n ) -> Session:\n self.poll_sessions() # make sure there is an up to date picture of Sessions before proceeding\n self.check_session_can_start(session_request_to_use)\n return self.perform_session_create(\n environ, self.project.session_parameters.serialize()\n )", "def create_session(self, loop):\n session = ClientSession(loop=loop, json_serialize=json_dumps)\n # Setting directly on `session` will raise deprecation warning\n object.__setattr__(session, \"_request\", self.match_request)\n return session", "def request_session(self):\n if not hasattr(self, \"_request_session\"):\n rqsid = self.shared_vars.pop(\"rqsid\", \"\")\n rqses = self.request_session_manager.pop_request_session(rqsid)\n\n if not rqses:\n if self.is_action():\n del session['VDOM_API_SESSIONS']\n raise RequestSessionDoesntExist\n\n rqses = self.request_session_manager.create_request_session()\n\n else:\n uuid = rqses[\"rqsid_uuid\"]\n if not self.verify_request_session_key(rqsid, uuid):\n del session['VDOM_API_SESSIONS']\n raise RequestSessionInvalidKey\n\n self._request_session = rqses\n\n return self._request_session", "def session():\n def session():\n return BaseUrlSession()\n return session", "def _new_session(self):\n try:\n self._session.close()\n except (AttributeError,TypeError):\n pass\n self._session = requests.Session()\n return self._session", "def create_session(obj):\n session = requests.Session()\n if obj.user is not None and obj.password is not None:\n session.auth = (obj.user, obj.password)\n\n # Proxy setup\n if obj.proxy is not None:\n proxy = '%s://%s:%s' % (translate_proxy_scheme(obj.proxy_type),\n obj.proxy_host, obj.proxy_port)\n session.proxies = {'http': proxy, 'https': proxy}\n\n # Emulate curl's way of handling SSL\n if obj.cainfo is not None:\n # CA certificates\n session.verify = obj.cainfo\n if obj.sslcert is not None:\n # Client certificate\n session.cert = obj.sslcert\n if obj.verifypeer is not None and not obj.verifypeer:\n # Disable certificate validation\n session.verify = False\n if obj.verifyhost is not None and not obj.verifyhost:\n # Check the certificate, but do not verify that the hostname matches it.\n session.mount('https://', HostNameIgnoringAdapter())\n else:\n # Setup the retry strategy\n session.mount('https://', HTTPAdapter(max_retries=retries))\n # setup retry strategy for http connections\n session.mount('http://', HTTPAdapter(max_retries=retries))\n\n return session", "def get_session(*args, **kwargs):\n session = requests.session(*args, **kwargs)\n\n return session", "def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session", "def session():\n s = requests.Session()\n retries = Retry(total=5, backoff_factor=0.5)\n s.mount(\"http://\", HTTPAdapter(max_retries=retries))\n return s", "def _init_session(session):\n if session is None:\n session = requests.Session()\n return session", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request", "def _request(self):\n if self._session is None:\n # Lazy import, may never be called\n from requests import Session\n from requests.adapters import HTTPAdapter\n from urllib3.util.retry import Retry\n\n # Create session with automatic retries on some error codes\n adapter = HTTPAdapter(max_retries=Retry(\n total=self._RETRIES, read=self._RETRIES, connect=self._RETRIES,\n backoff_factor=0.3, status_forcelist=(408, 500, 502, 504)))\n\n self._session = Session()\n self._session.mount('http://', adapter)\n self._session.mount('https://', adapter)\n self._session_request = self._session.request\n\n return self._session_request", "async def create_session() -> aiohttp.ClientSession:\n\n headers = generate_header()\n\n client_session = aiohttp.ClientSession(headers=headers)\n return client_session", "def get_session():\n if not hasattr(get_session, \"session\"):\n get_session.session = requests_cache.CachedSession(\n cache_name=CACHE_PATH.rstrip(\".sqlite\"),\n expire_after=518400, # 6 days\n )\n adapter = HTTPAdapter(max_retries=3)\n get_session.session.mount(\"http://\", adapter)\n get_session.session.mount(\"https://\", adapter)\n return get_session.session", "def _create_session(self) -> Session:\n session = Session()\n\n # Sets the client side and server side SSL cert verification, if provided as properties.\n if ssl_config := self.properties.get(SSL):\n if ssl_ca_bundle := ssl_config.get(CA_BUNDLE): # type: ignore\n session.verify = ssl_ca_bundle\n if ssl_client := ssl_config.get(CLIENT): # type: ignore\n if all(k in ssl_client for k in (CERT, KEY)):\n session.cert = (ssl_client[CERT], ssl_client[KEY])\n elif ssl_client_cert := ssl_client.get(CERT):\n session.cert = ssl_client_cert\n\n # If we have credentials, but not a token, we want to fetch a token\n if TOKEN not in self.properties and CREDENTIAL in self.properties:\n self.properties[TOKEN] = self._fetch_access_token(session, self.properties[CREDENTIAL])\n\n # Set Auth token for subsequent calls in the session\n if token := self.properties.get(TOKEN):\n session.headers[AUTHORIZATION_HEADER] = f\"{BEARER_PREFIX} {token}\"\n\n # Set HTTP headers\n session.headers[\"Content-type\"] = \"application/json\"\n session.headers[\"X-Client-Version\"] = ICEBERG_REST_SPEC_VERSION\n session.headers[\"User-Agent\"] = f\"PyIceberg/{__version__}\"\n\n # Configure SigV4 Request Signing\n if str(self.properties.get(SIGV4, False)).lower() == \"true\":\n self._init_sigv4(session)\n\n return session", "def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )", "def get_session():\n request_session = requests.Session()\n\n # Try to use what was passed in for username/password...\n username = CMD.username\n password = CMD.password\n \n # ...if there was nothing passed in then try to read it from config file\n if ((username is None or username == \"\") and (password is None or password == \"\")):\n # Try to read username and password from config file, if it exists\n # Otherwise default to DEFAULT_USERNAME/DEFAULT_PASSWORD\n try:\n with open(\"config.json\") as config_file:\n config_data = json.load(config_file)\n if (config_data):\n username = config_data[\"username\"]\n password = config_data[\"password\"]\n except:\n LOG.exception(\"Unable to open \\\"/collector/config.json\\\" file\")\n username = DEFAULT_USERNAME\n password = DEFAULT_PASSWORD\n\n request_session.auth = (username, password)\n request_session.headers = {\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"netapp-client-type\": \"grafana-\" + __version__}\n # Ignore the self-signed certificate issues for https\n request_session.verify = False\n return request_session", "def _create_login_session(self):\r\n sess = requests.Session()\r\n r = sess.get(self.page(self.LOGIN_PAGE), verify=self.verify)\r\n if r.status_code == 200:\r\n csrf_token = EndaceWebSession.find_csrf_token_login(r.content)\r\n if csrf_token is None:\r\n raise Exception(\"Could not find CSRF token\")\r\n # Submit login form\r\n login_result = sess.post(self.page(self.LOGIN_ACTION),\r\n data={\r\n \"_csrf\": csrf_token,\r\n \"d_user_id\": \"user_id\",\r\n \"t_user_id\": \"string\",\r\n \"c_user_id\": \"string\",\r\n \"e_user_id\": \"true\",\r\n \"f_user_id\": str(self.username),\r\n \"f_password\": str(self.password),\r\n \"Login\": \"Login\"},\r\n headers={'Content-type': 'application/x-www-form-urlencoded'}\r\n )\r\n if login_result.status_code == 200 and len(sess.cookies) > 0:\r\n return sess\r\n else:\r\n raise Exception(\"Login failed\")\r\n else:\r\n raise Exception(\"Login failed\")", "def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')", "def _create_normal_request(self, url):\r\n request = self.factory.get(url)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request", "def _create_redash_session():\n session = requests.Session()\n session.headers.update({'Authorization': 'Key {}'.format(API_KEY)})\n return session", "def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()", "def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0)\n self.session = session\n return session", "def session(self, request: HttpRequest) -> Job:\n job = Job.objects.create(\n project=self,\n creator=request.user if request.user.is_authenticated else None,\n method=JobMethod.session.name,\n params=dict(container_image=self.container_image),\n description=f\"Session for project '{self.name}'\",\n )\n job.add_user(request)\n return job", "def get_session() -> requests.Session:\n return _get_session_from_cache(thread_ident=threading.get_ident())", "def _create_nb_session(self):\n header = {\"Authorization\": \"Token {}\".format(settings.NB_API_KEY)}\n session = requests.Session()\n session.headers.update(header)\n self.nb_session = session\n log.info(\"Created new HTTP Session for NetBox.\")\n return session", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def _create_ssl_request(self, url):\r\n request = self.factory.get(url)\r\n request.META['SSL_CLIENT_S_DN'] = self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request", "def _get_session_from_cache(thread_ident: int) -> requests.Session:\n return _GLOBAL_BACKEND_FACTORY()", "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def session(request):\n session = get_test_db_session()\n request.cls.session = session\n return session", "def __init__(self):\n\n self._session = requests.Session()", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session", "def create_mock_session(self, base_url):\n logger.debug(\"robotframework-wiremock libary version: {}\".format(__version__))\n self.base_url = base_url\n self.session = requests.Session()", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id, self.message_mgr)\n self.sessions[session.id] = session\n return session", "def create_session(self, transport):\n session = self.SESSION_CLS(self, transport, 0, self.message_mgr)\n self.session = session\n return session", "def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s", "async def create_session(self):\n # Creating a session under an async function is recommended\n self.session = aiohttp.ClientSession()", "def session(get_session):\n return get_session()", "def init_session(self):\n self._session = requests.Session()", "def initiate_session():\n \n session_requests = requests.session()\n login_website(session_requests)\n print(\"Login Completed\")\n \n return session_requests", "def _getHttpSession(self):\n\n if self.httpSession is None:\n self.httpSession = requests.Session()\n return self.httpSession", "def _new_session(self, username_key=None, **attributes):\n for key in ['username', 'token', 'tenant_id']:\n if attributes.get(key, None) is None:\n attributes[key] = key + \"_\" + text_type(uuid4())\n if 'expires' not in attributes:\n attributes['expires'] = (\n datetime.utcfromtimestamp(self._clock.seconds())\n + timedelta(days=1)\n )\n session = Session(**attributes)\n if username_key is None:\n username_key = session.username\n self._username_to_token[username_key] = session.token\n self._token_to_session[session.token] = session\n self._tenant_to_token[session.tenant_id] = session.token\n return session", "def init_session():\n\n session = Session()\n\n # headers\n session.headers = {\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"pt-PT,pt;q=0.8,en-GB;q=0.6,en;q=0.4,en-US;q=0.2\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/javascript, application/javascript, */*\",\n \"Referer\": \"https://sigrhe.dgae.mec.pt/openerp/menu?active=474&tzoffset=-60\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"keep-alive\",\n \"DNT\": \"1\",\n \"Host\": \"sigrhe.dgae.mec.pt\",\n \"Origin\": \"https://sigrhe.dgae.mec.pt\",\n }\n\n return session", "def new_req_session_bad():\n saEngine = sqlalchemy.create_engine(\"sqlite://\", echo=False)\n saSessionmaker = sqlalchemy.orm.sessionmaker(bind=saEngine)\n saSession = saSessionmaker()\n # oauth1_model.initialize(saEngine, saSession)\n req = FakeRequest()\n req.dbSession = saSession\n return req", "def dummy_request(new_session, method=\"GET\"):\n request = testing.DummyRequest()\n request.method = method\n request.dbsession = new_session\n return request", "def getSession(self):\n if self.accessToken is None:\n self.authenticate()\n\n s = requests.Session()\n s.auth = self.getAuthObj()\n s.headers = {\"Accept\": \"application/json\"}\n return s", "def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session", "def initsession():\n global stringSearch_component\n global year_component\n global semester_component\n\n sess = requests.Session()\n soup_jar = {'hello':'hello'}\n sess.headers = SESSION_HEADERS\n sess.cookies.update({'sap-usercontext': 'sap-client=700'})\n\n res_init = sess.get(SOURCE_URL)\n soup_jar['init'] = BeautifulSoup(res_init.text, 'html.parser')\n\n form = soup_jar['init'].find('form', \n {'name': 'sap.client.SsrClient.form'})\n action = form.get('action')\n res_base = sess.post(HOST_URL + action)\n\n soup_jar['base'] = BeautifulSoup(res_base.text, 'lxml')\n\n sapid = get_sap_wd_secure_id(soup_jar['base'])\n contextid = get_sap_contextid(soup_jar['base'])\n stringSearchClass_component = \\\n get_string_search_class_component(soup_jar['base'])\n stringSearch_component = \\\n get_string_search_component(soup_jar['base']) \n year_component = get_year_component(soup_jar['base'])\n semester_component = get_semester_component(soup_jar['base'])\n\n return sapid, contextid", "def new_session(self):\n return self.Session()", "def get_session_factory(self, options):", "def create(id = None, expires=None):\n\n\t# Init the data\n\tdData = {}\n\n\t# If we have an expires time\n\tif expires:\n\t\tdData['__expire'] = expires\n\n\t# Create a new Session using a UUID as the id\n\treturn _Session(id and id or uuid.uuid4().hex, dData)", "def session(self):", "def __init__(self, req):\n #pass the request in making in so we can edit it later if requested (ACL for example)\n self.ip = req.connection.remote_ip\n c = Cookie.get_cookies(req)\n if not c.has_key('mps'):\n self.sessid = Uid().new_sid(req)\n else:\n c = c['mps']\n self.sessid = c.value\n \n #make new cookie so the cycle continues\n c = Cookie.Cookie('mps', self.sessid)\n c.path = '/'\n Cookie.add_cookie(req, c)\n \n self.session_path = \"%s%s\"%(path_to_sessions, self.sessid)\n self.full_session_path = \"%s%s\"%(self.session_path, db_extension)\n \n #use previous authenication until cookie is reevaluated, if they are officially logged in (in Instance)\n if os.path.exists(self.full_session_path):\n session = shelve.open(self.session_path, 'rw')\n self.user = session['USER_']\n session.close()\n else:\n self.user = self.unauthorized", "def __init__(self, url, username, password):\n self.session = requests.session()\n self.session.auth = (username, password)\n self.session.headers.update({\n 'Accept': JSON_CONTENT_TYPE,\n })\n self.url = url", "def _createSessionObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n # Get the conference entity\n conf = _getEntityByWebsafeKey(request.websafeConferenceKey,\n 'Conference')\n # Ensure that the current user is the conference organizer\n if user_id != conf.organizerUserId:\n raise endpoints.UnauthorizedException(\n 'Only the conference organizer can create a new session')\n # Verify that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Ensure that the user submitted the required name property\n if not request.name:\n raise endpoints.BadRequestException(\n \"Session 'name' field required\")\n # Copy SessionForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n # Remove data that isn't destined for the Session entity\n del data['websafeConferenceKey']\n del data['websafeSpeakerKey']\n del data['websafeKey']\n # Add default values for those missing in the data model\n for df in SESSION_DEFAULTS:\n if data[df] in (None, []):\n data[df] = SESSION_DEFAULTS[df]\n # Ensure the string version of typeOfSession is what is stored\n # in the NDB model\n data['typeOfSession'] = str(data['typeOfSession'])\n # Convert date from string to Date object\n if data['date'] is not None:\n try:\n data['date'] = datetime.strptime(\n data['date'][:10], '%Y-%m-%d').date()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'date' value\")\n # Convert startTime from string to Time object\n if data['startTime'] is not None:\n try:\n data['startTime'] = datetime.strptime(\n data['startTime'], '%H:%M').time()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'startTime' value\")\n # Create Session\n session = Session(**data)\n session.conference = conf.key\n session.speaker = speaker.key\n session.put()\n # Add the session key to the speaker's sessions list\n speaker.sessions.append(session.key)\n speaker.put()\n # Add a task to task queue which checks if the speaker of this session\n # should be the new featured speaker\n taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey,\n 'websafeConferenceKey': request.websafeConferenceKey},\n url='/tasks/update_featured_speaker'\n )\n # Return SessionForm object\n return self._copySessionToForm(session)", "def _create_user_session(url: str, netid: str = 'superuser', new: bool = False, add_to_os: bool = True):\n\n # Create requests session\n session = requests.session()\n\n if new:\n netid = create_user(netid, add_to_os)\n\n session.get(url + f'/admin/auth/token/{netid}')\n r = session.get(url + \"/public/auth/whoami\")\n\n try:\n assert r.status_code == 200\n data = r.json()\n assert data[\"success\"] is True\n assert data[\"data\"] is not None\n assert data[\"error\"] is None\n data = copy.deepcopy(data)\n admin_for = data['data']['user']['admin_for']\n for i in admin_for:\n if i['name'] == 'Intro to OS':\n session.cookies['course'] = base64.urlsafe_b64encode(json.dumps(i).encode()).decode()\n except AssertionError as e:\n print_full_error(e, r)\n return session, netid", "def dummy_request(new_session):\n return testing.DummyRequest(dbsession=new_session)", "def test_client_custom_session():\n c_session = requests.Session()\n client = ConfigureClients(custom_session=c_session)\n assert client.session == c_session", "def create_new_session(self, username):\n return self.session_mgr.create_new_session(username)", "def __init__(self, session):\n self._session = session", "def create_session(self, session_id=None):\n\n # create random id when necessary, seems to be 1 case wanted, based on legacy code\n # creating a value so high, typical client side generation schemes hopefully wont collide\n if not session_id:\n session_id = next(\n session_id for session_id in xrange(60000, 65000)\n if session_id not in self.sessions\n )\n\n # create and add session to local manager\n session = Session(session_id, config=self.config)\n self.add_session(session)\n\n # add shutdown handler to remove session from manager\n session.shutdown_handlers.append(self.session_shutdown)\n\n return session", "def __init__(self, username, password, **kwargs):\n timeout = kwargs.pop('timeout', 30)\n token_auth = kwargs.pop('token', None)\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n requests_version = requests.__version__\n if StrictVersion(requests_version) < '2.9.1':\n requests.packages.urllib3.disable_warnings()\n\n # Compose with a Session obj\n self.session = requests.Session()\n\n # Configure with passed parameters\n self.session.timeout = timeout\n\n # Handle token-based auth.\n if token_auth is True:\n self.session.auth = iControlRESTTokenAuth(username, password)\n elif token_auth: # Truthy but not true: non-default loginAuthProvider\n self.session.auth = iControlRESTTokenAuth(username,\n password,\n token_auth)\n else:\n self.session.auth = (username, password)\n\n # Set state as indicated by ancestral code.\n self.session.verify = False # XXXmake TOFU\n self.session.headers.update({'Content-Type': 'application/json'})", "def test_set_session():", "def __init__(self, session):\n self.session = session", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def test_new_session(self):\r\n cookie = Cookie()\r\n req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',\r\n base_path='/')\r\n session = Session(self.env, req)\r\n self.assertEqual(session.sid, cookie['trac_session'].value)\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT COUNT(*) FROM session\")\r\n self.assertEqual(0, cursor.fetchone()[0])", "def __init__(self, *args, **kwargs):\n self.session = requests.Session()\n access_token = get_process_execution_user_token()\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"\n self.session.headers[\"content-type\"] = \"application/json\"", "def get_test_request(url='/', data=None, method='get', session_data=None,\n user=None):\n method_to_call = getattr(RequestFactory(), method)\n if data is None:\n data = {}\n if session_data is None:\n session_data = {}\n if user is None:\n user = AnonymousUser()\n request = method_to_call(url, data)\n request.session = session_data\n request.user = user\n return request", "def session(self):\n if not hasattr(self, '_session'):\n self._session = FakeSession(self.version)\n self._session.auth = (self.key, 'ignore')\n return self._session", "def create_session(self, session_expiration_datetime=None):\n session_expiration_datetime = session_expiration_datetime or datetime.now() + timedelta(seconds=5)\n session = JOHN | dict(session_id=\"5\", session_expiration_datetime=session_expiration_datetime)\n self.database.sessions.find_one.return_value = session", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def create_session(self):\n\t\ttry:\n\t\t\tself.session = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception at create_session')\n\t\t\tlogger.debug('*' + sys.exc_info()[0])", "def getSession():\n return call(\"getSession\")", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "def generate_temp_session(self):\n return OAuth1Session(client_key=self.public_key,\n client_secret=self.private_key,\n resource_owner_key=self.token,\n resource_owner_secret=self.token_secret)", "def create_new_session(self) -> None:\n try:\n session = self.client.create_session()\n logger.info(\"created session: %s\", session.id)\n self.join_session(session.id)\n location_config = self.app.guiconfig.location\n self.session.location = SessionLocation(\n x=location_config.x,\n y=location_config.y,\n z=location_config.z,\n lat=location_config.lat,\n lon=location_config.lon,\n alt=location_config.alt,\n scale=location_config.scale,\n )\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"New Session Error\", e)", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def create_session(hostname, username, password):\n return slycat.web.server.remote.create_session(hostname, username, password, None)", "def get_session(self):\n session = Session(self.settings)\n self.sessions.append(session)\n return session", "def get_session():\n assert config.AUTH_URL, \"Environment variable OS_AUTH_URL is not defined\"\n\n def _get_session(auth_url=None,\n username=None,\n password=None,\n project_name=None,\n user_domain_name=None,\n project_domain_name=None):\n auth_url = auth_url or config.AUTH_URL\n username = username or config.USERNAME\n password = password or config.PASSWORD\n project_name = project_name or config.PROJECT_NAME\n user_domain_name = user_domain_name or config.USER_DOMAIN_NAME\n project_domain_name = project_domain_name or config.PROJECT_DOMAIN_NAME\n\n if config.KEYSTONE_API_VERSION == 3:\n\n auth = identity.v3.Password(\n auth_url=auth_url,\n username=username,\n user_domain_name=user_domain_name,\n password=password,\n project_name=project_name,\n project_domain_name=project_domain_name)\n\n elif config.KEYSTONE_API_VERSION == 2:\n\n auth = identity.v2.Password(\n auth_url=auth_url,\n username=username,\n password=password,\n tenant_name=project_name)\n\n else:\n raise ValueError(\"Unexpected keystone API version: {}\".format(\n config.KEYSTONE_API_VERSION))\n\n return _session.Session(auth=auth)\n\n return _get_session", "def __init__(self, url, session):\n self._url = url\n self._session = session", "def _fetch_herd_session():\n session = requests.Session()\n session.auth = (ADMIN_USERNAME, ADMIN_PASS)\n session.headers.update(HERD_HEADERS)\n\n return session", "def get_session():\n session = scoped_session(sessionmaker(bind=engine))\n return session", "def create_session(credentials):\n if type(credentials) == dict:\n pass\n elif type(credentials) == str:\n credentials = json.loads(credentials)\n else:\n credentials = json.load(credentials)\n\n session = Session(aws_access_key_id = credentials[\"aws_access_key\"],\n aws_secret_access_key = credentials[\"aws_secret_key\"],\n region_name = credentials.get('aws_region', const.REGION))\n return session", "def make_session(uri=None, echo=None, session_kwargs=None, **kwargs):\n if session_kwargs is None:\n session_kwargs = {}\n engine = create_engine(uri, echo=echo, **kwargs)\n log.debug(\"Created engine for session context\")\n return sqlalchemy.create_session(bind_to=engine, **session_kwargs)", "def _create_session(self, xnat_login, subject_id, visit_id):\n uri = ('/data/archive/projects/{}/subjects/{}/experiments/{}'\n .format(self.inputs.project_id, subject_id, visit_id))\n query = {'xsiType': 'xnat:mrSessionData', 'label': visit_id,\n 'req_format': 'qa'}\n response = xnat_login.put(uri, query=query)\n if response.status_code not in (200, 201):\n raise NiAnalysisError(\n \"Could not create session '{}' in subject '{}' in project '{}'\"\n \" response code {}\"\n .format(visit_id, subject_id, self.inputs.project_id,\n response))\n return xnat_login.classes.MrSessionData(uri=uri,\n xnat_session=xnat_login)", "def test_ctor_no_cookie(self):\n request = self._make_request()\n session = self._makeOne(request)\n session_dict = session.managed_dict\n self.assertDictEqual(session_dict, {})\n self.assertIs(session.new, True)", "def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual):\n\n # build options\n options = {}\n if not isinstance(media_mode, MediaModes):\n raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode))\n if not isinstance(archive_mode, ArchiveModes):\n raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode))\n if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed:\n raise OpenTokException(u('A session with always archive mode must also have the routed media mode.'))\n options[u('p2p.preference')] = media_mode.value\n options[u('archiveMode')] = archive_mode.value\n if location:\n # validate IP address\n try:\n inet_aton(location)\n except:\n raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location))\n options[u('location')] = location\n\n try:\n response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout)\n response.encoding = 'utf-8'\n\n if response.status_code == 403:\n raise AuthError('Failed to create session, invalid credentials')\n if not response.content:\n raise RequestError()\n dom = xmldom.parseString(response.content)\n except Exception as e:\n raise RequestError('Failed to create session: %s' % str(e))\n\n try:\n error = dom.getElementsByTagName('error')\n if error:\n error = error[0]\n raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value))\n\n session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue\n return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode)\n except Exception as e:\n raise OpenTokException('Failed to generate session: %s' % str(e))", "def new_session(self):\n return self._SessionLocal()", "def init_session(self):\n pass", "def init_session(self):\n pass", "def init_session(refresh_token, base_search_url):\n post_body = {\"refresh_token\": refresh_token}\n response = requests.post(\n \"{}/tokens/\".format(base_search_url), json=post_body)\n response.raise_for_status()\n token = response.json()[\"id_token\"]\n session = requests.Session()\n session.headers.update({\"Authorization\": \"Bearer {}\".format(token)})\n return session", "def http_session(cookies=None):\n session = requests.Session()\n if cookies is not False:\n session.cookies.update(cookies or cookiejar())\n session.headers.update({'User-Agent': 'ipsv/{v}'.format(v=ips_vagrant.__version__)})\n\n return session" ]
[ "0.8304606", "0.7970149", "0.7811028", "0.7757718", "0.75642025", "0.7562853", "0.7443917", "0.738472", "0.7361329", "0.7354772", "0.73463327", "0.73270935", "0.7302046", "0.7212053", "0.7189299", "0.70787716", "0.70699424", "0.7052273", "0.70521116", "0.7011763", "0.69860953", "0.69781035", "0.69706", "0.6930477", "0.6920755", "0.6909619", "0.68467367", "0.68407047", "0.6824291", "0.6822142", "0.68209755", "0.6762547", "0.6752156", "0.67340386", "0.6733581", "0.6716866", "0.67130345", "0.66897076", "0.66801065", "0.6662875", "0.6661402", "0.66419953", "0.6640459", "0.6631177", "0.66182035", "0.661484", "0.6611943", "0.65979606", "0.6580247", "0.6546193", "0.6500699", "0.6500249", "0.6494444", "0.64592475", "0.64321965", "0.6417584", "0.63952416", "0.6393319", "0.6382984", "0.63745207", "0.63596076", "0.6353217", "0.63085896", "0.6308133", "0.6304234", "0.62816185", "0.62776095", "0.6253607", "0.6244943", "0.62339276", "0.6230731", "0.61963004", "0.6159196", "0.615896", "0.6156086", "0.6144107", "0.6142642", "0.6138216", "0.6131194", "0.6117949", "0.6105551", "0.6100596", "0.60945827", "0.60815394", "0.60722804", "0.606275", "0.6055206", "0.60509115", "0.6048776", "0.6032107", "0.6031452", "0.6030195", "0.60226136", "0.60223913", "0.6015646", "0.6013848", "0.60120463", "0.60120463", "0.600842", "0.5995839" ]
0.67009187
37
Load and return the vowel training dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target
def load_vowel(): train = _load_vowel_train() test = _load_vowel_test() return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def get_x_y() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n logger.log('Loading Dataset...')\n x_train, y_train = helpers.datasets.load_voice()\n logger.log(str(len(y_train)) + ' train data loaded')\n\n x_test, y_test = None, None\n # x_test, y_test = helpers.datasets.load_voice(train=False)\n # logger.log(str(len(y_test)) + ' test data loaded')\n\n return x_train, y_train, x_test, y_test", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def convert_data_to_examples(train, test, data_column, label_column):\r\n train_InputExamples = train.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n\r\n validation_InputExamples = test.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n return train_InputExamples, validation_InputExamples", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y", "def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototypes by clustering over stored acoustic reps\n raw_data = data.reshape(4 * len(self.stems), 2)\n ac_vowels, ac_spread = vq.kmeans(raw_data, 4)\n # find articulatory reps by comparing synthesized output vowels to\n # acoustic prototypes\n # start with candidate list of \"all possible\" articulations\n tmp_ar = N.empty((1, 3))\n rd = 0.0\n for hi in [0.0, 1.0]:\n for bk in [0.0, 1.0]:\n tmp_ar = N.vstack((tmp_ar, N.array([hi, bk, rd])))\n tmp_ar = tmp_ar[1:]\n while len(self.vowel_map) < 4:\n # no noise (since this shouldn't be running through the \"mouth\")\n tmp_ac = self.perceive(self.acoustify(tmp_ar))\n for v in ac_vowels:\n dists = N.sqrt(N.sum((v - tmp_ac)**2, axis=1))\n d = 0\n while True:\n if dists[d] < (2 * ac_spread):\n # found an articulatory prototype\n self.vowel_map[tuple(v)] = tmp_ar[d]\n # remove it from the candidate list\n tmp_ar = N.vstack((tmp_ar[:d], tmp_ar[d + 1:]))\n tmp_ac = N.vstack((tmp_ac[:d], tmp_ac[d + 1:]))\n break\n d += 1\n if d == len(dists):\n # take the best of the bad ones\n index = N.argmin(dists)\n self.vowel_map[tuple(v)] = tmp_ar[index]\n break\n self.vowel_spread = ac_spread\n return self.vowel_map", "def learn(self, Xtrain, ytrain):", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def load_MNIST(filename_train='train.csv', filename_test='test.csv'):\n # Load training data\n reader = csv.reader(open(filename_train,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xtr = numpy.array(xs)\n Ytr = numpy.array(ys)\n\n # Load test data\n reader = csv.reader(open(filename,test,\"rb\"), delimiter=',')\n reader.next()\n x = list(reader)\n xs = []\n ys = []\n for i in xrange(len(x)):\n ys.append([x[i][0]])\n xs.append(x[i][1:])\n Xte = numpy.array(xs)\n Yte = numpy.array(ys)\n\n return Xtr,Ytr,Xte,Yte", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def __init__(self,\n x_train,\n y_train,\n train_indices,\n x_test,\n y_test,\n test_indices,\n x_unlabel=None,\n y_unlabel=None,\n unlabel_indices=None,\n y_train_str=None,\n y_test_str=None):\n self._x_train = x_train\n self._train_indices = train_indices\n self._y_train = y_train\n self._x_test = x_test\n self._y_test = y_test\n self._test_indices = test_indices\n self._x_unlabel = x_unlabel\n self._y_unlabel = y_unlabel\n self._unlabel_indices = unlabel_indices\n self._y_train_str = y_train_str\n self._y_test_str = y_test_str", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_or_generate_data(self) -> None:\n\n # Training set defined as a 5 x 5 square:\n xg1 = np.linspace(-5, 10, 5)\n xg2 = np.linspace(0, 15, 5)\n x = np.zeros((xg1.size * xg2.size, 2))\n for i, x1 in enumerate(xg1):\n for j, x2 in enumerate(xg2):\n x[i + xg1.size * j, :] = [x1, x2]\n\n y = self.branin(x)[:, None]\n self.x, self.y = x, y", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def load_dataset(dataset_file, suffix, columns_for_Y):\n ouput_name = '{}.csv'.format(remove_extension(dataset_file))\n train_all = pd.read_csv('{}-{}-train'.format(ouput_name, suffix))\n test_all = pd.read_csv('{}-{}-test'.format(ouput_name, suffix))\n X_train = train_all[train_all.columns.difference(columns_for_Y)]\n X_test = test_all[test_all.columns.difference(columns_for_Y)]\n\n # X_train, X_test, Y_train, Y_test\n return X_train, X_test, train_all[columns_for_Y], test_all[columns_for_Y]", "def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test", "def loadtrainData():\n train_x = []\n train_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n train_y.append(int(lineArr[-1]))\n return np.mat(train_x), np.mat(train_y).transpose()", "def load_data(filename):\n with open(\"./shopping.csv\", \"r\") as f:\n reader = csv.reader(f)\n next(reader)\n evidence_raw = []\n labels_raw = []\n for row in reader:\n evidence_raw.append(row[:-1])\n labels_raw.append(row[-1])\n evidence = []\n labels = []\n for row1, row2 in zip(evidence_raw, labels_raw):\n evidence.append(oneHotEncode_Evi(row1))\n labels.append(oneHotEncode_labels(row2))\n return (evidence, labels)", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def dataset(self):\n if self.X is not None and self.y is not None:\n return self.X, self.y\n\n self.X, self.y = self.get_BOW_from_file(self.labels[0])\n for label in self.labels[1:]:\n X_temp, y_temp = self.get_BOW_from_file(label)\n self.X = np.concatenate((self.X, X_temp))\n self.y = np.concatenate((self.y, y_temp))\n\n return self.X, self.y", "def load_data(train_size=30000, random_state=0):\n print(\"Loading adult data from alibi.\")\n np.random.seed(random_state)\n\n data = alibi.datasets.fetch_adult()\n\n # mix input data\n data_perm = np.random.permutation(np.c_[data.data, data.target])\n data.data = data_perm[:, :-1]\n data.target = data_perm[:, -1]\n\n # perform train / test split\n X_train, y_train = data.data[:train_size, :], data.target[:train_size]\n X_test, y_test = data.data[train_size:, :], data.target[train_size:]\n\n return data, X_train, y_train, X_test, y_test", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def load_data(test_split=0.1):\n global _data\n random.shuffle(_data)\n idx = int(len(_data) * (1 - test_split))\n x_train, y_train = np.array([d[:4] for d in _data[:idx]]), np.array([name_index[d[4]] for d in _data[:idx]])\n x_test, y_test = np.array([d[:4] for d in _data[idx:]]), np.array([name_index[d[4]] for d in _data[idx:]])\n return (x_train, y_train), (x_test, y_test)", "def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix", "def load_data(dataset_str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\n \"data/corpus/{}/{}.test.index\".format(dataset_str, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)\n\n # training nodes are training docs, no initial features\n # print(\"x: \", x)\n # test nodes are training docs, no initial features\n # print(\"tx: \", tx)\n # both labeled and unlabeled training instances are training docs and words\n # print(\"allx: \", allx)\n # training labels are training doc labels\n # print(\"y: \", y)\n # test labels are test doc labels\n # print(\"ty: \", ty)\n # ally are labels for labels for allx, some will not have labels, i.e., all 0\n # print(\"ally: \\n\")\n # for i in ally:\n # if(sum(i) == 0):\n # print(i)\n # graph edge weight is the word co-occurence or doc word frequency\n # no need to build map, directly build csr_matrix\n # print('graph : ', graph)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n # print(len(labels))\n\n idx_test = test_idx_range.tolist()\n # print(idx_test)\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def _load_data(self, X, y):\n self.Xtrain = X\n\n # y if categories, needs to be transformed to number before fit\n self.le = LabelEncoder()\n self.ytrain = self.le.fit_transform(y)", "def eccv_train_and_test_set(split_csv_filepath):\n df = pd.read_csv(split_csv_filepath, header=None)\n filenames = [os.path.join(os.path.dirname(split_csv_filepath), 'hotornot_face', _.replace('.bmp', '.jpg')) for _ in\n df.iloc[:, 0].tolist()]\n scores = df.iloc[:, 1].tolist()\n flags = df.iloc[:, 2].tolist()\n\n train_set = dict()\n test_set = dict()\n\n for i in range(len(flags)):\n if flags[i] == 'train':\n train_set[filenames[i]] = scores[i]\n elif flags[i] == 'test':\n test_set[filenames[i]] = scores[i]\n\n return train_set, test_set", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n data_arr = onp.load(path)\n x, y = data_arr[\"x\"], data_arr[\"y\"]\n\n indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))\n indices = onp.asarray(indices)\n x, y = x[indices], y[indices]\n\n n_train = int(train_fraction * len(x))\n x_train, y_train = x[:n_train], y[:n_train]\n x_test, y_test = x[n_train:], y[n_train:]\n\n def normalize_with_stats(arr, arr_mean=None, arr_std=None):\n return (arr - arr_mean) / arr_std\n\n def normalize(arr):\n eps = 1e-6\n arr_mean = arr.mean(axis=0, keepdims=True)\n arr_std = arr.std(axis=0, keepdims=True) + eps\n return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std\n\n x_train, x_mean, x_std = normalize(x_train)\n y_train, y_mean, y_std = normalize(y_train)\n x_test = normalize_with_stats(x_test, x_mean, x_std)\n y_test = normalize_with_stats(y_test, y_mean, y_std)\n\n data_info = {\"y_scale\": float(y_std)}\n\n return (x_train, y_train), (x_test, y_test), data_info", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def get_data():\n iris = datasets.load_iris()\n xall = np.asarray(iris[\"data\"], dtype=np.float64)\n yall = np.asarray(iris[\"target\"], dtype=np.float64)\n xall = np.vstack([xall, (7, 2.0, 4.5, 1)])\n yall = np.append(yall, n_classes)\n X, Xval, y, yval = train_test_split(\n xall, yall, test_size=0.2, shuffle=True, random_state=12345\n )\n y = tf.one_hot(y, n_classes)\n yval = tf.one_hot(yval, n_classes)\n return X, y, Xval, yval", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def load_data(self):\n\n\t\tboard_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'board_data.dat'))\n\t\tcows_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'cows_data.dat'))\n\t\tlabels = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'labels.dat'))\n\t\tlabels = labels.reshape((len(labels), 1))\n\n\t\tpermutation = np.random.permutation(len(labels))\n\n\t\treturn (board_data[permutation],\n\t\t cows_data[permutation],\n\t\t labels[permutation])", "def import_training_data(target_col = 'label'):\n dir = os.path.dirname(os.path.dirname(__file__)) # go up one level to get root of this experiment\n path = os.path.join(dir, 'data','train.csv')\n utils_logr.info('Loading data from {} as pandas df'.format(path))\n df = pd.read_csv(path)\n y = df[target_col]\n df = df.drop(target_col, axis=1)\n return df, y", "def read_data():\n csv_data = pd.read_csv('./dataset.csv')\n x = csv_data[['X1', 'X2']]\n x = x.values # numpy array for x: (180, 2)\n y = csv_data['Label']\n y = y.values # numpy array for y: (180, )\n\n\t# shuffle the data\n total = x.shape[0]\n mask = list(range(total))\n np.random.shuffle(mask)\n x = x[mask]\n y = y[mask]\n\t\n\t# 80 percent for train and 20 percent for test\n train_split = int(0.8 * total)\n x_train, y_train = x[:train_split], y[:train_split]\n x_test, y_test = x[train_split:], y[train_split:]\n return x_train, y_train, x_test, y_test", "def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()", "def load_data(train=True):\n#test set\n start = time.clock()\n\n if train==False:\n dp = '/home/thomas/Desktop/OttoChallenge/test.csv'\n df = pd.read_csv(dp)\n X = df.values.astype(np.float32)[:,1:]\n return X\n#train set\n else:\n dp = '/home/thomas/Desktop/OttoChallenge/train.csv'\n df = pd.read_csv(dp)\n X = df[df.columns[:-1]].values.astype(np.float32)[:,1:]\n y = df.target\n y =y.apply(lambda X: int(X[-1])).values\n y = y.astype(np.int32)\n X, y = shuffle(X, y)\n #print(X.shape,y.shape)\n end = time.clock()\n print(end-start)\n\n return X,y", "def get_train_data(self, train_data):\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"<w>\"] = 1 # word start\n c2i[\"</w>\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"<w>\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"</w>\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def train(self, X, y):", "def get_data_set(train=True):\n\n # 1\n train_or_test = \"train\" if train == True else \"test\"\n data_path = os.path.join(data_dir, \"aclImdb\",train_or_test)\n\n # 2\n pos_glob_pattern = os.path.join(data_path, \"pos\", \"*.txt\")\n neg_glob_pattern = os.path.join(data_path, \"neg\", \"*.txt\")\n pos_file_path_seq = glob.glob(pos_glob_pattern)\n neg_file_path_seq = glob.glob(neg_glob_pattern)\n\n # 3\n pos_dataset = [text_to_one_line(path) for path in pos_file_path_seq]\n neg_dataset = [text_to_one_line(path) for path in neg_file_path_seq]\n x = pos_dataset + neg_dataset\n y = [1.0] * len(pos_dataset) + [0.0] * len(neg_dataset)\n\n return x, y", "def get_training_data():\n features = []\n labels = []\n\n with open('data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n rows = [line for line in csv_reader]\n random.shuffle(rows)\n\n for vector in rows:\n feature_vector = [float(vector[i]) for i in range(4)]\n features.append(feature_vector)\n labels.append(encode_label(vector[4]))\n\n normalise_features(features)\n\n return features, labels", "def load_data(train_path, test_path):\n\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n print(\"number of training examples = \" + str(train_data.shape[0]))\n print(\"number of test examples = \" + str(test_data.shape[0]))\n print(\"train shape: \" + str(train_data.shape))\n print(\"test shape: \" + str(test_data.shape))\n\n return train_data, test_data", "def load_data(train_path, test_path):\n\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n print(\"number of training examples = \" + str(train_data.shape[0]))\n print(\"number of test examples = \" + str(test_data.shape[0]))\n print(\"train shape: \" + str(train_data.shape))\n print(\"test shape: \" + str(test_data.shape))\n\n return train_data, test_data", "def load_data(path='alex_mnist_data.npz'):\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['alex_train_data'], f['alex_train_label']\n x_test, y_test = f['alex_test_data'], f['alex_test_label']\n return (x_train, y_train),(x_test, y_test)", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def load_dataset(self, x_t, y_t, x_v, y_v, all_data, all_labels):\n\n self.x_t, self.y_t, self.x_v, self.y_v = x_t, y_t, x_v, y_v\n\n self.all_data = all_data\n self.all_labels = all_labels\n\n if self.verbose:\n print(f'The training data has dimension of {self.x_t.shape}.')\n print(f'The training labels has dimension of {self.y_t.shape}.')\n print(f'The testing data has dimension of {self.x_v.shape}.')\n print(f'The testing labels has dimension of {self.y_v.shape}.')", "def load_data(directory: str, dataset_str: str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"{}/ind.{}.{}\".format(directory, dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"{}/ind.{}.test.index\".format(directory, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return graph, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def load_data(data_dir):\n training_data = pd.read_csv(os.path.join(data_dir, 'driving_log.csv'))\n X = training_data[['center', 'left', 'right']].values\n y = training_data['steering'].values\n \n return X, y", "def load_test_train_data(train_file,\n test_file,\n load_augmented_train_data=False,\n num_words=None,\n max_comment_length=500):\n train_data = load_train_data(train_file, load_augmented_train_data)\n test_data = pd.read_csv(\n test_file, encoding='utf-8', dtype={'comment_text': str})\n tokenizer = Tokenizer(num_words, oov_token='unk')\n tokenizer.fit_on_texts(\n pd.concat(\n [train_data['comment_text'], test_data['comment_text']],\n ignore_index=True))\n # Pull train, test data and their labels\n x_train = []\n y_train = []\n for seq, row in zip(\n tokenizer.texts_to_sequences_generator(train_data['comment_text']),\n train_data[[\n 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult',\n 'identity_hate'\n ]].iterrows()):\n x_train.append(seq)\n y_train.append(row[1].values.tolist())\n # Truncate and pad input sequences\n x_train = sequence.pad_sequences(x_train, maxlen=max_comment_length)\n x_test = sequence.pad_sequences(\n tokenizer.texts_to_sequences(test_data['comment_text']),\n maxlen=max_comment_length)\n return (np.asarray(x_train),\n np.asarray(y_train)), np.asarray(x_test), tokenizer.word_index", "def load_data(train_file, test_file):\n\n data = np.asarray(pd.read_csv(train_file, header=0))\n data_ts = np.asarray(pd.read_csv(test_file, header=0))\n\n x_tra = data[:, :-1]\n y_tra = data[:, -1]\n\n return x_tra, y_tra, data_ts", "def trainData(self, X, y, NeuralNet, epochs):", "def load_examples():\n X = []\n Y = []\n with open('examples.txt') as fin:\n for i, line in enumerate(fin):\n if line[0].isdigit():\n bias, pos, neg, label = map(float, line.strip().split(','))\n X.append([bias, pos, neg])\n Y.append(label)\n X = np.array(X)\n Y = np.array(Y).reshape(i, 1)\n return X, Y", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def load_eval_dataset(self):\n dict_path = get_eval_data(self.eval_path, self.src_lang, self.tgt_lang)\n\n pairs = []\n not_found_all = 0\n not_found_L1 = 0\n not_found_L2 = 0\n\n # Open the file and check if src and tgt word exists in the vocab\n with open(dict_path, \"r\") as f:\n for _, line in enumerate(f):\n word1, word2 = line.rstrip().split()\n if word1 in self.src_dico and word2 in self.tgt_dico:\n pairs.append((self.src_dico.index(word1), self.tgt_dico.index(word2)))\n else:\n not_found_all += 1\n not_found_L1 += int(word1 not in self.src_dico)\n not_found_L2 += int(word2 not in self.tgt_dico)\n print(\n \"Found %i pairs of words in the dictionary (%i unique). \"\n \" %i other pairs contained at least one unknown word \"\n \" (%i in src_lang, %i in tgt_lang)\"\n % (\n len(pairs),\n len(set([x for x, _ in pairs])),\n not_found_all,\n not_found_L1,\n not_found_L2,\n )\n )\n src_ind = [pairs[x][0] for x in range(len(pairs))]\n tgt_ind = [pairs[x][1] for x in range(len(pairs))]\n self.src_ind = np.asarray(src_ind)\n self.tgt_ind = np.asarray(tgt_ind)", "def generate_inputs_and_targets(self, batch_size, train = True):\n if train:\n dataset = self.train_dataset\n else:\n dataset = self.test_dataset\n loader = torch.utils.data.DataLoader(dataset = dataset, batch_size=batch_size, shuffle=True)\n batch_idx, (data, target) = next(enumerate(loader))\n\n \"\"\" Convert the dataset \"data\" and \"target\" variables to s and d \"\"\"\n data, target = data.to(self.device), target.to(self.device)\n x_data = data.reshape([batch_size, 28*28]) # Flatten\n y_target = torch.zeros([batch_size, 10])\n for n in range(batch_size):\n y_target[n, target[n]] = 1 # Convert to one-hot\n \n return x_data, y_target", "def load_data(max_len: int, vocab_size: int) -> Tuple[NumpyDataset, NumpyDataset]:\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.imdb.load_data(maxlen=max_len, num_words=vocab_size)\n # pad the sequences to max length\n x_train = np.array([pad(x, max_len, 0) for x in x_train])\n x_eval = np.array([pad(x, max_len, 0) for x in x_eval])\n\n train_data = NumpyDataset({\"x\": x_train, \"y\": y_train})\n eval_data = NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n return train_data, eval_data", "def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test", "def oil(data_set='three_phase_oil_flow'):\r\n if not data_available(data_set):\r\n download_data(data_set)\r\n oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')\r\n oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')\r\n oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')\r\n oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')\r\n oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')\r\n oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')\r\n fid = open(oil_train_file)\r\n X = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_test_file)\r\n Xtest = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_valid_file)\r\n Xvalid = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_trainlbls_file)\r\n Y = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_testlbls_file)\r\n Ytest = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_validlbls_file)\r\n Yvalid = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)\r\n #else:\r\n # throw an error\r", "def load_data():\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n\r\n # Load the data\r\n\r\n with open(\"clean_real.txt\", 'r') as RealNews:\r\n RealStrAr = RealNews.read().split('\\n')\r\n\r\n with open(\"clean_fake.txt\", 'r') as FakeNews:\r\n FakeStrAr = FakeNews.read().split('\\n')\r\n\r\n # Preprocess it using a vectorizer\r\n\r\n MyCoolVectorizer = CountVectorizer()\r\n X = MyCoolVectorizer.fit_transform(RealStrAr + FakeStrAr)\r\n\r\n RealLabels = np.ones((len(RealStrAr), 1)) # means real\r\n FakeLabels = np.zeros((len(FakeStrAr), 1)) # means fake\r\n AllLabels = np.append(RealLabels, FakeLabels, axis=0)\r\n\r\n FinalTensor = np.append(X.toarray(), AllLabels, axis=1)\r\n\r\n # Randomize it and split it\r\n\r\n np.random.shuffle(FinalTensor)\r\n\r\n # divide and multiply by 2 just to make sure it's even\r\n ROUGHLY70 = 2 * ((FinalTensor.shape[0] * 70 / 100) / 2)\r\n ROUGHLY15 = (FinalTensor.shape[0] - ROUGHLY70) / 2\r\n\r\n # TEST SET VALIDATION SET TRAINING SET DICTIONARY\r\n return (FinalTensor[:ROUGHLY15], FinalTensor[ROUGHLY15 : 2 * ROUGHLY15], FinalTensor[-ROUGHLY70:], MyCoolVectorizer.get_feature_names())", "def load_data():\n print (\"Loading the arxiv.\")\n arxiv = datasets.load_arxiv(depth=2)\n\n print (\"Loading the RNN training data.\")\n print (\"Select depth (2/3/5):\")\n depth = int(input())\n rnn_training_data = datasets.training_data(\"rnn\", depth=depth)\n\n n_inputs = rnn_training_data.training.X.shape[2] # dimension of w2v model\n n_outputs = rnn_training_data.training.dimY\n\n return arxiv, rnn_training_data, n_inputs, n_outputs", "def _load_training_data(self):\n self._save_training_data()", "def get_training_data(self):\n\n # this actually never was a set\n # src_set = self.target['src'].values\n # dst_set = self.target['dst'].values\n\n # train_negative = self.get_negative_edges(src_set, dst_set, self.train_ind.shape[0]) # * self.K)\n # test_negative = self.get_negative_edges(src_set, dst_set, self.test_ind.shape[0])\n\n train_positive = self.target.iloc[self.train_edge_ind].values\n test_positive = self.target.iloc[self.test_edge_ind].values\n\n # # print(train_positive.shape, train_negative.shape, test_positive.shape, test_negative.shape)\n # print(f\"Working with {train_positive.shape[0]} positive and {train_negative.shape[0]} negative samples in the train set, {test_positive.shape[0]} and {test_negative.shape[0]} - in test set\")\n\n X_train = train_positive\n X_test = test_positive\n\n y_train = np.ones((self.train_edge_ind.shape[0],))\n y_test = np.ones((self.test_edge_ind.shape[0],))\n\n # X_train = np.vstack([\n # train_positive,\n # train_negative\n # ])\n\n # X_test = np.vstack([\n # test_positive,\n # test_negative\n # ])\n\n # y_train = np.concatenate([np.ones((self.train_ind.shape[0],)), np.zeros((self.train_ind.shape[0]),)]) # self.train_ind.shape[0]) * self.K\n # y_test = np.concatenate([np.ones((self.test_ind.shape[0],)), np.zeros((self.test_ind.shape[0],))])\n\n assert X_train.shape[0] == y_train.shape[0]\n assert X_test.shape[0] == y_test.shape[0]\n\n def shuffle(X, y):\n ind_shuffle = np.arange(0, X.shape[0])\n np.random.shuffle(ind_shuffle)\n return X[ind_shuffle], y[ind_shuffle]\n\n self.X_train, self.y_train = shuffle(X_train, y_train)\n self.X_test, self.y_test = shuffle(X_test, y_test)\n\n print(f\"Splitting into {self.X_train.shape[0]} train and {self.X_test.shape[0]} test samples\")\n\n # return X_train, X_test, y_train, y_test", "def load_data(filename, load2=True, load3=True):\n assert (load2 or load3), \"Atleast one dataset must be loaded.\"\n data = np.load(filename)\n if load2 and load3:\n inputs_train = np.hstack((data['train2'], data['train3']))\n inputs_valid = np.hstack((data['valid2'], data['valid3']))\n inputs_test = np.hstack((data['test2'], data['test3']))\n target_train = np.hstack((np.zeros((1, data['train2'].shape[1])), np.ones((1, data['train3'].shape[1]))))\n target_valid = np.hstack((np.zeros((1, data['valid2'].shape[1])), np.ones((1, data['valid3'].shape[1]))))\n target_test = np.hstack((np.zeros((1, data['test2'].shape[1])), np.ones((1, data['test3'].shape[1]))))\n else:\n if load2:\n inputs_train = data['train2']\n target_train = np.zeros((1, data['train2'].shape[1]))\n inputs_valid = data['valid2']\n target_valid = np.zeros((1, data['valid2'].shape[1]))\n inputs_test = data['test2']\n target_test = np.zeros((1, data['test2'].shape[1]))\n else:\n inputs_train = data['train3']\n target_train = np.zeros((1, data['train3'].shape[1]))\n inputs_valid = data['valid3']\n target_valid = np.zeros((1, data['valid3'].shape[1]))\n inputs_test = data['test3']\n target_test = np.zeros((1, data['test3'].shape[1]))\n\n return inputs_train.T, inputs_valid.T, inputs_test.T, target_train.T, target_valid.T, target_test.T", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def build_enru_custom_test(self):\n train_data_file = self.data_dir + '/' + enru_paracrawl\n eval_data_file = self.data_dir + '/' + enru_newscomm\n train_data = tf.data.experimental.CsvDataset(\n [train_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = train_data.cache() # only read once\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.skip(9000).take(10000)\n eval_data = eval_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def train(self, X, y):\n pass", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def trainAndCalculate(self):\n f = open(\"congressional_voting_dataset.csv\")\n data = np.genfromtxt(fname = f, delimiter=',', dtype=str, encoding=None)\n X = data[:, :-1]\n y = data[:, -1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n self.svclassifier.fit(X_train, y_train)" ]
[ "0.7972531", "0.6284241", "0.62223625", "0.61976546", "0.6173541", "0.6136231", "0.6088554", "0.60739297", "0.60504085", "0.60448134", "0.60420364", "0.60369146", "0.60268086", "0.60214686", "0.60152215", "0.6013476", "0.6010893", "0.5968949", "0.5953889", "0.5910949", "0.589535", "0.58857495", "0.58570874", "0.58107865", "0.5791524", "0.5782802", "0.57761437", "0.57728124", "0.57667637", "0.5729273", "0.5725304", "0.5725012", "0.57191813", "0.5713741", "0.57055366", "0.5691502", "0.568829", "0.5675569", "0.567355", "0.567281", "0.56567997", "0.56511945", "0.5643744", "0.56421244", "0.5636686", "0.5632004", "0.5628001", "0.56206244", "0.56198853", "0.56055", "0.5605213", "0.5604374", "0.5603613", "0.56007075", "0.55968064", "0.5589664", "0.55743474", "0.55711144", "0.55709803", "0.5565128", "0.5563055", "0.55539006", "0.55402887", "0.5535538", "0.5527032", "0.552657", "0.5518466", "0.55171776", "0.55116665", "0.5493009", "0.54922813", "0.5485722", "0.5485722", "0.5480997", "0.5476532", "0.54746294", "0.5459228", "0.54525405", "0.5450419", "0.54488045", "0.54385614", "0.5432898", "0.54309374", "0.54240805", "0.5423515", "0.5422813", "0.541729", "0.5412019", "0.54108065", "0.5409112", "0.5403748", "0.5403662", "0.5399056", "0.5394533", "0.5393193", "0.5392291", "0.53882074", "0.53854316", "0.538469", "0.5375252" ]
0.8670741
0
Load and return the vowel testing dataset. Returns (X, y) Tuple A tuple of data and target
def _load_vowel_test(): vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1) X = vowel_data[:, -10:] y = vowel_data[:, 1].astype(int) return (X, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vowel():\n train = _load_vowel_train()\n test = _load_vowel_test()\n return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototypes by clustering over stored acoustic reps\n raw_data = data.reshape(4 * len(self.stems), 2)\n ac_vowels, ac_spread = vq.kmeans(raw_data, 4)\n # find articulatory reps by comparing synthesized output vowels to\n # acoustic prototypes\n # start with candidate list of \"all possible\" articulations\n tmp_ar = N.empty((1, 3))\n rd = 0.0\n for hi in [0.0, 1.0]:\n for bk in [0.0, 1.0]:\n tmp_ar = N.vstack((tmp_ar, N.array([hi, bk, rd])))\n tmp_ar = tmp_ar[1:]\n while len(self.vowel_map) < 4:\n # no noise (since this shouldn't be running through the \"mouth\")\n tmp_ac = self.perceive(self.acoustify(tmp_ar))\n for v in ac_vowels:\n dists = N.sqrt(N.sum((v - tmp_ac)**2, axis=1))\n d = 0\n while True:\n if dists[d] < (2 * ac_spread):\n # found an articulatory prototype\n self.vowel_map[tuple(v)] = tmp_ar[d]\n # remove it from the candidate list\n tmp_ar = N.vstack((tmp_ar[:d], tmp_ar[d + 1:]))\n tmp_ac = N.vstack((tmp_ac[:d], tmp_ac[d + 1:]))\n break\n d += 1\n if d == len(dists):\n # take the best of the bad ones\n index = N.argmin(dists)\n self.vowel_map[tuple(v)] = tmp_ar[index]\n break\n self.vowel_spread = ac_spread\n return self.vowel_map", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def generate_vowel():\n return random.sample(['a', 'e', 'i', 'o', 'u', 'y'], 1)", "def vowels(self):\n vas = []\n file = self.read()\n words = re.sub(\"[aeiouAEIOU]\",\" \", file).split(\" \")\n for h_u in words:\n if h_u != \"\":\n vas.append(h_u)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas", "def analyse_vowels(self, source):\r\n\r\n word_set = set()\r\n with open(source) as f:\r\n for line in f:\r\n words = [word.lower().strip() for word in line.split()]\r\n for word in words:\r\n map(self.parse_character, word)\r\n stripped = ''.join(c for c in word if c in letters)\r\n if stripped:\r\n word_set.add(stripped)\r\n vowels = self.get_possible_vowels(word_set)\r\n return self.filter_vowels(vowels, word_set)", "def test_one_disemvowel_code_wars():\n from disemvowel_trolls import disemvowel\n tests = [(\"This website is for losers LOL!\", \"Ths wbst s fr lsrs LL!\"),\n (\"No offense but,\\nYour writing is among the worst I've everread\",\n \"N ffns bt,\\nYr wrtng s mng th wrst 'v vrrd\"),\n (\"What are you, a communist?\", \"Wht r y, cmmnst?\")]\n\n for case in tests:\n assert disemvowel(case[0]) == case[1]", "def get_x_y() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n logger.log('Loading Dataset...')\n x_train, y_train = helpers.datasets.load_voice()\n logger.log(str(len(y_train)) + ' train data loaded')\n\n x_test, y_test = None, None\n # x_test, y_test = helpers.datasets.load_voice(train=False)\n # logger.log(str(len(y_test)) + ' test data loaded')\n\n return x_train, y_train, x_test, y_test", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def test_train_test_split_uni_exo(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n ####################################\n #### Continuous fh without Gaps ####\n ####################################\n\n #### Integer fh ----\n exp = TSForecastingExperiment()\n fh = 12\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test.index == data.iloc[-fh:].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test.index == data.iloc[-fh:].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(exp.train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(exp.X_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.X_test_transformed.index == data.iloc[-fh:].index)\n assert np.all(exp.y_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test_transformed.index == data.iloc[-fh:].index)\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(1, 10) # 9 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [1, 2, 3, 4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.X_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.X_test_transformed.index == data.iloc[-len(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #################################\n #### Continuous fh with Gaps ####\n #################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(7, 13) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n\n ####################################\n #### Discontinuous fh with Gaps ####\n ####################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.array([4, 5, 6, 10, 11, 12]) # 6 values\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6, 10, 11, 12]\n exp.setup(data=data, target=target, fh=fh, seasonal_period=4, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n # `test`` call still refers to y_test indices and not X_test indices\n assert len(exp.test) == len(fh)\n assert np.all(exp.X.index == data.index)\n assert np.all(exp.y.index == data.index)\n assert np.all(exp.X_train.index == data.iloc[: (len(data) - max(fh))].index)\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test.index == data.iloc[-max(fh) :].index)\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert np.all(exp.X_transformed.index == data.index)\n assert np.all(exp.y_transformed.index == data.index)\n assert np.all(\n exp.X_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n # Exogenous variables will not have any gaps (only target has gaps)\n assert np.all(exp.X_test_transformed.index == data.iloc[-max(fh) :].index)\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)", "def load_data():\n d = sio.loadmat('ex5data1.mat')\n return map(np.ravel, [d['X'], d['y'], d['Xval'], d['yval'], d['Xtest'], d['ytest']])", "def get_vowel_names():", "def convert_data_to_examples(train, test, data_column, label_column):\r\n train_InputExamples = train.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n\r\n validation_InputExamples = test.apply(lambda x: InputExample(guid=None,\r\n text_a = x[data_column],\r\n text_b = None,\r\n label = x[label_column]), axis = 1)\r\n\r\n return train_InputExamples, validation_InputExamples", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def load_data_test(self, size, a_low, a_high=None):\n\n if a_high is None:\n a_high = self.a;\n\n data, label = self._generate_test_set(size, a_low, a_high, flip_structure=False);\n\n return data, label;", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def test_convert_single_vowel():\n for vowel in \"aeiou\":\n result = convert(vowel)\n assert result == vowel + \"way\"", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def test__validate_with_synthetic_data(elbow_with_synthetic_data):\n x, y, break_pt = elbow_with_synthetic_data\n expected_elbow = np.argmin(np.abs(x - break_pt))\n assert expected_elbow == find_elbow_point(x, y)", "def oil(data_set='three_phase_oil_flow'):\r\n if not data_available(data_set):\r\n download_data(data_set)\r\n oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')\r\n oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')\r\n oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')\r\n oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')\r\n oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')\r\n oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')\r\n fid = open(oil_train_file)\r\n X = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_test_file)\r\n Xtest = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_valid_file)\r\n Xvalid = np.fromfile(fid, sep='\\t').reshape((-1, 12))\r\n fid.close()\r\n fid = open(oil_trainlbls_file)\r\n Y = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_testlbls_file)\r\n Ytest = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n fid = open(oil_validlbls_file)\r\n Yvalid = np.fromfile(fid, sep='\\t').reshape((-1, 3)) * 2. - 1.\r\n fid.close()\r\n return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)\r\n #else:\r\n # throw an error\r", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20", "def test_enforce_exogenous_exo_data(load_uni_exo_data_target):\n data, target = load_uni_exo_data_target\n\n exp1 = TSForecastingExperiment()\n exp1.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=True)\n num_models1 = len(exp1.models())\n\n exp2 = TSForecastingExperiment()\n exp2.setup(data=data, target=target, seasonal_period=4, enforce_exogenous=False)\n num_models2 = len(exp2.models())\n\n # We know that some models do not offer exogenous variables support, so the\n # following check is valid for now.\n assert num_models1 < num_models2", "def load_characteristics(self):\r\n data = self.data\r\n X = data[:, :-1]\r\n Y = data[:, -1]\r\n return X, Y", "def load_eval_dataset(self):\n dict_path = get_eval_data(self.eval_path, self.src_lang, self.tgt_lang)\n\n pairs = []\n not_found_all = 0\n not_found_L1 = 0\n not_found_L2 = 0\n\n # Open the file and check if src and tgt word exists in the vocab\n with open(dict_path, \"r\") as f:\n for _, line in enumerate(f):\n word1, word2 = line.rstrip().split()\n if word1 in self.src_dico and word2 in self.tgt_dico:\n pairs.append((self.src_dico.index(word1), self.tgt_dico.index(word2)))\n else:\n not_found_all += 1\n not_found_L1 += int(word1 not in self.src_dico)\n not_found_L2 += int(word2 not in self.tgt_dico)\n print(\n \"Found %i pairs of words in the dictionary (%i unique). \"\n \" %i other pairs contained at least one unknown word \"\n \" (%i in src_lang, %i in tgt_lang)\"\n % (\n len(pairs),\n len(set([x for x, _ in pairs])),\n not_found_all,\n not_found_L1,\n not_found_L2,\n )\n )\n src_ind = [pairs[x][0] for x in range(len(pairs))]\n tgt_ind = [pairs[x][1] for x in range(len(pairs))]\n self.src_ind = np.asarray(src_ind)\n self.tgt_ind = np.asarray(tgt_ind)", "def test_returns_true_for_vowel(self):\n \n self.assertEqual(vowel_check.is_vowel('i'), True)", "def load_data(dataset_str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\n \"data/corpus/{}/{}.test.index\".format(dataset_str, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)\n\n # training nodes are training docs, no initial features\n # print(\"x: \", x)\n # test nodes are training docs, no initial features\n # print(\"tx: \", tx)\n # both labeled and unlabeled training instances are training docs and words\n # print(\"allx: \", allx)\n # training labels are training doc labels\n # print(\"y: \", y)\n # test labels are test doc labels\n # print(\"ty: \", ty)\n # ally are labels for labels for allx, some will not have labels, i.e., all 0\n # print(\"ally: \\n\")\n # for i in ally:\n # if(sum(i) == 0):\n # print(i)\n # graph edge weight is the word co-occurence or doc word frequency\n # no need to build map, directly build csr_matrix\n # print('graph : ', graph)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n # print(len(labels))\n\n idx_test = test_idx_range.tolist()\n # print(idx_test)\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def __init__(self,\n x_train,\n y_train,\n train_indices,\n x_test,\n y_test,\n test_indices,\n x_unlabel=None,\n y_unlabel=None,\n unlabel_indices=None,\n y_train_str=None,\n y_test_str=None):\n self._x_train = x_train\n self._train_indices = train_indices\n self._y_train = y_train\n self._x_test = x_test\n self._y_test = y_test\n self._test_indices = test_indices\n self._x_unlabel = x_unlabel\n self._y_unlabel = y_unlabel\n self._unlabel_indices = unlabel_indices\n self._y_train_str = y_train_str\n self._y_test_str = y_test_str", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def load_data(directory: str, dataset_str: str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"{}/ind.{}.{}\".format(directory, dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"{}/ind.{}.test.index\".format(directory, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return graph, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def loadData (x_file=\"../ass1_data/logisticX.csv\", y_file=\"../logisticY.csv\"):\n\n X = np.genfromtxt(x_file, delimiter=',')\n Y = np.genfromtxt(y_file, dtype=int)\n\n return (X, Y)", "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n data_arr = onp.load(path)\n x, y = data_arr[\"x\"], data_arr[\"y\"]\n\n indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))\n indices = onp.asarray(indices)\n x, y = x[indices], y[indices]\n\n n_train = int(train_fraction * len(x))\n x_train, y_train = x[:n_train], y[:n_train]\n x_test, y_test = x[n_train:], y[n_train:]\n\n def normalize_with_stats(arr, arr_mean=None, arr_std=None):\n return (arr - arr_mean) / arr_std\n\n def normalize(arr):\n eps = 1e-6\n arr_mean = arr.mean(axis=0, keepdims=True)\n arr_std = arr.std(axis=0, keepdims=True) + eps\n return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std\n\n x_train, x_mean, x_std = normalize(x_train)\n y_train, y_mean, y_std = normalize(y_train)\n x_test = normalize_with_stats(x_test, x_mean, x_std)\n y_test = normalize_with_stats(y_test, y_mean, y_std)\n\n data_info = {\"y_scale\": float(y_std)}\n\n return (x_train, y_train), (x_test, y_test), data_info", "def test_dataset_autogen_with_test(autogen_dataset_with_test):\n train_dummy = \"Etiam ligula tortor, dictum eu, placerat eget, venenatis a, magna.\"\n val_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n test_dummy = \"a sollicitudin orci sem eget massa. Suspendisse eleifend. Cras sed\"\n\n assert autogen_dataset_with_test.train[0][0] == train_dummy\n assert autogen_dataset_with_test.train[0][1] == '6'\n assert len(autogen_dataset_with_test.train) == 80\n\n assert autogen_dataset_with_test.val[0][0] == val_dummy\n assert autogen_dataset_with_test.val[0][1] == '6'\n assert len(autogen_dataset_with_test.val) == 20\n\n assert autogen_dataset_with_test.test[0][0] == test_dummy\n assert autogen_dataset_with_test.test[0][1] == '3'\n assert len(autogen_dataset_with_test.test) == 50", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def dataset(self):\n if self.X is not None and self.y is not None:\n return self.X, self.y\n\n self.X, self.y = self.get_BOW_from_file(self.labels[0])\n for label in self.labels[1:]:\n X_temp, y_temp = self.get_BOW_from_file(label)\n self.X = np.concatenate((self.X, X_temp))\n self.y = np.concatenate((self.y, y_temp))\n\n return self.X, self.y", "def load_or_generate_data(self) -> None:\n\n # Training set defined as a 5 x 5 square:\n xg1 = np.linspace(-5, 10, 5)\n xg2 = np.linspace(0, 15, 5)\n x = np.zeros((xg1.size * xg2.size, 2))\n for i, x1 in enumerate(xg1):\n for j, x2 in enumerate(xg2):\n x[i + xg1.size * j, :] = [x1, x2]\n\n y = self.branin(x)[:, None]\n self.x, self.y = x, y", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def elbow_with_synthetic_data():\n delta = 0.1\n slope_2 = 2\n slope_3 = 3\n break_pt = 5\n intercept_2 = 0.0\n line_2 = np.arange(0, break_pt, delta) * slope_2 + intercept_2\n line_3 = (\n np.arange(break_pt, break_pt * 2, delta) * slope_3\n + (slope_2 - slope_3) * break_pt\n )\n x = np.arange(0, break_pt * 2, delta)\n y = np.concatenate((line_2, line_3))\n break_pt = break_pt\n\n return x, y, break_pt", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_data(filename):\n with open(\"./shopping.csv\", \"r\") as f:\n reader = csv.reader(f)\n next(reader)\n evidence_raw = []\n labels_raw = []\n for row in reader:\n evidence_raw.append(row[:-1])\n labels_raw.append(row[-1])\n evidence = []\n labels = []\n for row1, row2 in zip(evidence_raw, labels_raw):\n evidence.append(oneHotEncode_Evi(row1))\n labels.append(oneHotEncode_labels(row2))\n return (evidence, labels)", "def reading_data(fname,goal):\n \n #Reading of the EEG data\n data = pd.read_csv(fname)\n events_fname = fname.replace('_data','_events')\n labels= pd.read_csv(events_fname)\n\n if goal==\"training\":\n data=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n elif goal==\"testing\":\n labels=labels.drop(['id' ], axis=1)\n else:\n raise SystemExit(\"The goal variable is unknown for the function\")\n\n return data, labels", "def load_data(self):\n\n\t\tboard_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'board_data.dat'))\n\t\tcows_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'cows_data.dat'))\n\t\tlabels = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'labels.dat'))\n\t\tlabels = labels.reshape((len(labels), 1))\n\n\t\tpermutation = np.random.permutation(len(labels))\n\n\t\treturn (board_data[permutation],\n\t\t cows_data[permutation],\n\t\t labels[permutation])", "def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y", "def test_import_wine():\n X, y = wine_data()\n\n assert(X.shape[1] == 12)\n assert(len(y.unique().tolist()) == 3)\n assert(X.shape[0] == y.shape[0])", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))", "def japanese_vowels(\n one_hot_encode=True, repeat_targets=False, data_folder=None, reload=False\n):\n\n data_folder = _get_data_folder(data_folder)\n\n complete = True\n for file_role, file_name in REMOTE_FILES.items():\n if not (data_folder / file_name).exists():\n complete = False\n break\n\n if reload or not complete:\n _download(data_folder)\n\n data_files = {}\n for file_role, file_name in REMOTE_FILES.items():\n\n with open(data_folder / file_name, \"r\") as fp:\n\n if file_role in [\"train_sizes\", \"test_sizes\"]:\n data = fp.read().split(\" \")\n # remove empty characters and spaces\n data = [int(s) for s in filter(lambda s: s not in [\"\", \"\\n\", \" \"],\n data)]\n\n else:\n data = fp.read()\n\n data_files[file_role] = data\n\n X_train, Y_train = _format_data(\n data_files[\"train\"], data_files[\"train_sizes\"], one_hot_encode\n )\n\n X_test, Y_test = _format_data(\n data_files[\"test\"], data_files[\"test_sizes\"], one_hot_encode\n )\n\n if repeat_targets:\n Y_train = _repeat_target(X_train, Y_train)\n Y_test = _repeat_target(X_test, Y_test)\n\n return X_train, Y_train, X_test, Y_test", "def test(exe, chunk_evaluator, save_dirname, test_data, place):\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)\n chunk_evaluator.reset()\n for data in test_data():\n word = to_lodtensor(list(map(lambda x: x[0], data)), place)\n target = to_lodtensor(list(map(lambda x: x[1], data)), place)\n result_list = exe.run(inference_program,\n feed={\"word\": word,\n \"target\": target},\n fetch_list=fetch_targets)\n number_infer = np.array(result_list[0])\n number_label = np.array(result_list[1])\n number_correct = np.array(result_list[2])\n chunk_evaluator.update(\n int(number_infer[0]),\n int(number_label[0]), int(number_correct[0]))\n return chunk_evaluator.eval()", "def load_examples():\n X = []\n Y = []\n with open('examples.txt') as fin:\n for i, line in enumerate(fin):\n if line[0].isdigit():\n bias, pos, neg, label = map(float, line.strip().split(','))\n X.append([bias, pos, neg])\n Y.append(label)\n X = np.array(X)\n Y = np.array(Y).reshape(i, 1)\n return X, Y", "def find_words_using_all_vowels():\n pass", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def load_data(test_split=0.1):\n global _data\n random.shuffle(_data)\n idx = int(len(_data) * (1 - test_split))\n x_train, y_train = np.array([d[:4] for d in _data[:idx]]), np.array([name_index[d[4]] for d in _data[:idx]])\n x_test, y_test = np.array([d[:4] for d in _data[idx:]]), np.array([name_index[d[4]] for d in _data[idx:]])\n return (x_train, y_train), (x_test, y_test)", "def find_vowels(s):\n \"*** YOUR CODE HERE ***\"", "def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def from_coords(self, consonant, vowel):\n return self._table[consonant][self.vowels.index(vowel)]", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def convert_int_data(lines):\n tagdict = load('help/tagsets/upenn_tagset.pickle')\n list_tags = list(tagdict.keys()) # Get the list of all the tags.\n X, Y = [], [] # Creation of the array\n for j in range(len(lines)):\n line = lines[j]\n if len(line) >= 5: # We want the word in the middle of five words\n index = np.random.random_integers(low=2, high=len(line) - 3) # Take the index of the word to be choosen\n neighbours_words = [line[i] for i in (index - 2, index - 1, index + 1, index + 2)] # Extract the words\n Y.append(one_hot_encoding(lines[j][index], list_tags)) # Append the target to the array\n sample = []\n for word in neighbours_words:\n sample.append(one_hot_encoding(word, list_tags).tolist())\n X.append(sample) # Append the 4 neighbouring words\n\n return np.array(X), np.array(Y)", "def test_xyz_uvw():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n\n assert uvw.shape == (3, 9)\n\n assert uvw[0][0] == 0.0\n assert uvw[1][0] == 0.0\n assert uvw[2][0] == 0.0", "def evaluate_vae(beta, model, loader_test, device):\n model.eval() # switch to evaluation mode\n\n test_loss = 0\n test_kl_loss = 0\n test_rl_loss = 0\n\n for i, data in enumerate(loader_test):\n data, _ = data\n data = data.to(device)\n\n reconstr_batch, mu, logvar = model(data)\n\n test_rl, test_kld, test_beta = loss_function_vae(reconstr_batch, data, mu, logvar, beta)\n\n loss = test_rl + test_beta * test_kld\n\n test_kl_loss += test_kld.item()\n test_rl_loss += test_rl.item()\n test_loss += loss.item()\n\n test_loss /= len(loader_test.dataset)\n test_kl_loss /= len(loader_test.dataset)\n test_rl_loss /= len(loader_test.dataset)\n\n print(f\"====> Test set loss: {test_loss}\")\n\n return test_loss, test_kl_loss, test_rl_loss", "def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()", "def parse_test_data(test_set, training_output, language):\n print \"Reading test set: \" + test_set\n xmldoc = minidom.parse(test_set)\n data = {}\n lex_list = xmldoc.getElementsByTagName('lexelt')\n for node in lex_list:\n lexelt = node.getAttribute('item') # item \"active.v\"\n data[lexelt] = []\n inst_list = node.getElementsByTagName('instance')\n for inst in inst_list:\n instance_id = inst.getAttribute('id') # id \"activate.v.bnc.00024693\"\n neighbor_word_list = training_output[lexelt][\"neighbor_word_list\"]\n _4c_4d_feature = training_output[lexelt][\"4c_4d_feature\"]\n x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language)\n data[lexelt].append((instance_id, x))\n\n return data", "def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')", "def iris_data():\n X, y = load_iris()['data'], load_iris()['target']\n y[y == 2.] = 0 # N.B. make binary, TODO simulate a competition dataset\n return BasicExamplesProvider(X, y)", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_data_val(self, size, a_low, a_high=None):\n data, label = self._generate_test_set(size, a_low, a_high, \n flip_structure=True);\n\n \n return data, label;", "def _read_vee(filename):\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split()\n if len(line) <= 2:\n size1, size2 = int(line[0]), int(line[1])\n vee = NP.zeros((size1, size1, size2, size2), dtype=NP.float64)\n elif len(line) == 5:\n mu, nu, lmda, sgma, val = int(line[0]) - 1, int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1, NP.float64(line[4])\n vee[mu,nu,lmda,sgma] = \\\n vee[nu,mu,lmda,sgma] = \\\n vee[mu,nu,sgma,lmda] = \\\n vee[nu,mu,sgma,lmda] = \\\n vee[lmda,sgma,mu,nu] = \\\n vee[sgma,lmda,mu,nu] = \\\n vee[lmda,sgma,nu,mu] = \\\n vee[sgma,lmda,nu,mu] = \\\n val\n return vee", "def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def generate_data(dataset, target_filename, label):\n\n data_dir = check_data(dataset)\n\n data_x = np.empty((0, NB_SENSOR_CHANNELS))\n data_y = np.empty((0))\n\n zf = zipfile.ZipFile(dataset)\n print (\"Processing dataset files ...\")\n for filename in OPPORTUNITY_DATA_FILES:\n try:\n data = np.loadtxt(BytesIO(zf.read(filename)))\n print (\"... file {0}\".format(filename))\n x, y = process_dataset_file(data, label)\n data_x = np.vstack((data_x, x))\n data_y = np.concatenate([data_y, y])\n except KeyError:\n print (\"ERROR: Did not find {0} in zip file\".format(filename))\n\n # Dataset is divided into train and test\n nb_training_samples = 557963\n # The first 18 OPPORTUNITY data files are used for the traning dataset, having 557963 samples\n X_train, y_train = data_x[:nb_training_samples,:], data_y[:nb_training_samples]\n X_test, y_test = data_x[nb_training_samples:,:], data_y[nb_training_samples:]\n\n print (\"Final datasets with size: | train {0} | test {1} | \".format(X_train.shape,X_test.shape))\n\n obj = [(X_train, y_train), (X_test, y_test)]\n f = open(os.path.join(data_dir, target_filename), 'wb')\n cp.dump(obj, f, protocol=cp.HIGHEST_PROTOCOL)\n f.close()", "def testdata_matcher(fname1='easy1.png', fname2='easy2.png'):\n import utool as ut\n #import vtool as vt\n from vtool import image as gtool\n from vtool import features as feattool\n fpath1 = ut.grab_test_imgpath(fname1)\n fpath2 = ut.grab_test_imgpath(fname2)\n featkw = dict(rotation_invariance=True)\n kpts1, vecs1 = feattool.extract_features(fpath1, **featkw)\n kpts2, vecs2 = feattool.extract_features(fpath2, **featkw)\n #if featkw['rotation_invariance']:\n # print('ori stats 1 ' + ut.get_stats_str(vt.get_oris(kpts2)))\n # print('ori stats 2 ' + ut.get_stats_str(vt.get_oris(kpts1)))\n rchip1 = gtool.imread(fpath1)\n rchip2 = gtool.imread(fpath2)\n #chip1_shape = vt.gtool.open_image_size(fpath1)\n chip2_shape = gtool.open_image_size(fpath2)\n dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1]\n testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2)\n return testtup", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def load_test_data(self, file_):\n logging.info('loading data from %s' % file_)\n\n true_edge_data_by_type = {}\n fake_edge_data_by_type = {}\n with open(file_, 'r') as reader:\n for line in reader:\n words = line.strip().split(' ')\n src, dst = self.word2index[words[1]], self.word2index[words[2]]\n e_type = words[0]\n if int(words[3]) == 1: # true edges\n if e_type not in true_edge_data_by_type:\n true_edge_data_by_type[e_type] = list()\n true_edge_data_by_type[e_type].append((src, dst))\n else: # fake edges\n if e_type not in fake_edge_data_by_type:\n fake_edge_data_by_type[e_type] = list()\n fake_edge_data_by_type[e_type].append((src, dst))\n\n return (true_edge_data_by_type, fake_edge_data_by_type)", "def fixture_coord():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tcoord_x, coord_y, coord = read.load_coord(EXAMPLE_FILE_FOLDER)\n\treturn coord", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y", "def get_data(dataset: str = None, scale: bool = True) -> tuple:\n \n if dataset not in ['ecoli', 'glass', 'letter-recognition', 'lymphography', 'yeast', \n 'digits', 'breast-cancer', 'wine', 'mnist']:\n raise ValueError(\"Invalid dataset provided.\")\n \n if dataset in dataset in ['ecoli', 'glass', 'letter-recognition', 'lymphography', 'yeast']:\n path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/' \n f = path + dataset + \"/\" + dataset + \".data\"\n \n if dataset in ['ecoli', 'yeast']:\n df = pd.read_table(f, delim_whitespace=True, header=None)\n elif dataset in [ 'glass', 'letter-recognition', 'lymphography']:\n df = pd.read_csv(f, header=None)\n elif dataset == 'digits':\n df = load_digits()\n X = df.data\n y = df.target\n elif dataset == 'breast-cancer':\n df = load_breast_cancer()\n X = df.data\n y = df.target\n elif dataset == 'wine':\n df = load_wine()\n X = df.data\n y = df.target\n \n if dataset == 'ecoli':\n y = preprocessing.LabelEncoder().fit_transform(df.iloc[:,-1])\n X = df.iloc[:,1:8].values\n \n elif dataset == 'glass':\n y = df.iloc[:,-1].values\n X = df.iloc[:, 1:(df.shape[1]-1)].values\n \n elif dataset in ['letter-recognition', 'lymphography']:\n y = preprocessing.LabelEncoder().fit_transform(df.iloc[:,0])\n X = df.iloc[:, 1:(df.shape[1])].values\n \n elif dataset == 'yeast':\n y = preprocessing.LabelEncoder().fit_transform(df.iloc[:,-1])\n X = df.iloc[:,1:9].values\n \n elif dataset == 'mnist':\n X, y = fetch_openml('mnist_784', version=1, return_X_y=True)\n y = y.astype('int64') \n\n if scale==True:\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n \n return X, y", "def get_inputs_test():\n x = tf.constant(extract_pandas_data(x_test))\n y = tf.constant(y_test.values)\n return x, y", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def _read_txt(self, expected_col_names):\n\n try:\n # Read data\n data = pd.read_csv(self.source)\n\n # Check number of columns\n if data.shape[1] != len(expected_col_names):\n raise ValueError(\n \"Unexpected number of columns. Expected {}.\".format(\n len(expected_col_names)))\n # Check column names\n for item in data.columns:\n if item not in expected_col_names:\n raise ValueError(\"Unexpected column name. Expected:{}\"\\\n .format(expected_col_names))\n\n # Convert data\n for column in data.columns:\n data[column] = pd.to_numeric(data[column])\n\n # Generate output\n if self.coordinate_system == CoordinateSystem.GEOGRAPHIC:\n def generate_utm(row):\n return UtmCoordinate.create_from_geographic(\n row['latitude'],\n row['longitude'],\n row['elevation'])\n data['UTM'] = data.apply(generate_utm, axis=1)\n data['easting'] = data.apply(lambda row: row['UTM'].easting,\n axis=1)\n data['northing'] = data.apply(lambda row: row['UTM'].northing,\n axis=1)\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.UTM:\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.CARTESIAN:\n data['elevation'] = data['z'] # keeping return values consitent\n data['z'] = data['elevation'] - data['elevation'].min()\n\n else:\n raise ValueError('Unknown coordinate system.')\n\n selection = ['x', 'y', 'z', 'elevation']\n return data[selection]\n except Exception as exception:\n raise exception", "def get_var(self, X_test: Array, y_pred: Array, uncertainty: str = \"full\") -> Array:\n valid_uncert = [\"epistemic\", \"aleatoric\", \"full\"]\n assert (\n uncertainty in valid_uncert\n ), f\"uncertainty must be one of {valid_uncert}, got {uncertainty}\"\n\n # trees is a list of fitted binary decision trees.\n trees = self.estimators_\n y_var_epist, y_var_aleat = np.zeros([2, len(X_test)])\n\n for tree in trees:\n # We use tree impurity as a proxy for aleatoric uncertainty.\n # Doesn't work well in experiments though.\n # leaf indices that each sample is predicted as.\n leaf_idx = tree.apply(X_test)\n # Grab the impurity of assigned leafs.\n y_var_tree = tree.tree_.impurity[leaf_idx]\n y_var_aleat += y_var_tree\n\n y_pred_tree = tree.predict(X_test)\n y_var_epist += y_pred_tree**2\n\n y_var_aleat /= len(trees)\n y_var_epist /= len(trees)\n y_var_epist -= y_pred**2\n\n if uncertainty == \"aleatoric\":\n return y_var_aleat\n\n if uncertainty == \"epistemic\":\n return y_var_epist\n\n y_var = y_var_epist + y_var_aleat\n return y_var", "def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]", "def eye():\n eye_timestamps = read_npy_file('eye.timestamps.npy')[:, 1] # takes out index\n eye_area = read_npy_file('eye.area.npy')\n eye_xy_pos = read_npy_file('eye.xyPos.npy')\n pupil = TimeSeries(\n name='eye_area',\n timestamps=eye_timestamps,\n data=np.ravel(eye_area),\n unit='arb. unit',\n description='Features extracted from the video of the right eye.',\n comments='The area of the pupil extracted with DeepLabCut. Note that '\n 'it is relatively very small during the discrimination task '\n 'and during the passive replay because the three screens are '\n 'medium-grey at this time and black elsewhere - so the much '\n 'brighter overall luminance levels lead to relatively '\n 'constricted pupils.'\n )\n eye_xy = TimeSeries(\n name='eye_xy_positions',\n timestamps=eye_timestamps,\n data=eye_xy_pos, # currently as [x, y] pairs\n unit='arb. unit',\n description='Features extracted from the video of the right eye.',\n comments='The 2D position of the center of the pupil in the video '\n 'frame. This is not registered to degrees visual angle, but '\n 'could be used to detect saccades or other changes in eye position.'\n )\n pupil_track = PupilTracking(pupil)\n pupil_track.add_timeseries(eye_xy)\n behavior_module.add_data_interface(pupil_track)", "def test_output(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.Fu\"]:\n params = tuple(finput.values())\n\n self_1, x_1, u_1 = deepcopy(params)\n\n self_2, x_2, u_2 = deepcopy(params)\n\n Fu_1 = EKFSLAM.EKFSLAM.Fu(self_1, x_1, u_1)\n\n Fu_2 = solution.EKFSLAM.EKFSLAM.Fu(self_2, x_2, u_2)\n \n assert compare(Fu_1, Fu_2)\n \n assert compare(self_1, self_2)\n assert compare(x_1, x_2)\n assert compare(u_1, u_2)", "def test_dataset_from_file_reversed(train_dataset_reversed):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset_reversed[0][0] == '6'\n assert train_dataset_reversed[0][1] == dummy", "def load_own_target():\n data = Data()\n target = data.get_label_col()\n return target.values", "def load_data(data_path=DATA_PATH):\n with open (os.path.join(DATA_PATH, \"imdb_extrait.pkl\"),\"rb\") as file:\n \n [data , id2titles , fields ]= pk.load(file)\n \n \n datax = data [: ,:33]\n datay = np.array([1 if x [33] >6.5 else -1 for x in data ])\n \n return datax, datay, id2titles, fields", "def select_eye(data, use_eye):\r\n \r\n if (use_eye == 'left') & ('pup_l' in data.columns):\r\n data[['x','y','pup']] = data[['x_l','y_l','pup_l']]\r\n elif (use_eye == 'right') & ('pup_r' in data.columns):\r\n data[['x','y','pup']] = data[['x_r','y_r','pup_r']]\r\n\t# when the eye requested is not available\r\n elif use_eye == 'left':\r\n data[['x','y','pup']] = data[['x_r','y_r','pup_r']]\r\n warnings.warn(\"Left eye not available. Right eye is used.\")\r\n elif use_eye == 'right':\r\n data[['x','y','pup']] = data[['x_l','y_l','pup_l']]\r\n warnings.warn(\"Right eye not available. Left eye is used.\")\r\n return data[['x','y','pup']]", "def test(self, test_data):\n with open(test_data, 'r') as test_data:\n results = {}\n for type in self.label_type_map:\n results[self.label_type_map[type]] = []\n while True:\n tokens = test_data.readline().split()\n pos = test_data.readline().split()\n indices = test_data.readline().split()\n if not tokens or not pos or not indices:\n break\n curr_results = self.viterbi(tokens)\n intervals = self.extract_intervals(curr_results, indices)\n for type in intervals:\n for interval in intervals[type]:\n results[type].append(interval)\n self.write_results(results)", "def test_train_test_split_uni_no_exo(load_pos_and_neg_data):\n data = load_pos_and_neg_data\n\n ####################################\n #### Continuous fh without Gaps ####\n ####################################\n\n #### Integer fh ----\n exp = TSForecastingExperiment()\n fh = 12\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test.index == data.iloc[-fh:].index)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test.index == data.iloc[-fh:].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(exp.train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.test_transformed.index == data.iloc[-fh:].index)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(exp.y_train_transformed.index == data.iloc[: (len(data) - fh)].index)\n assert np.all(exp.y_test_transformed.index == data.iloc[-fh:].index)\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(1, 10) # 9 values\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [1, 2, 3, 4, 5, 6]\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.test.index == data.iloc[-len(fh) :].index)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert np.all(exp.y_test.index == data.iloc[-len(fh) :].index)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.test_transformed.index == data.iloc[-len(fh) :].index)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert np.all(exp.y_test_transformed.index == data.iloc[-len(fh) :].index)\n\n #################################\n #### Continuous fh with Gaps ####\n #################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.arange(7, 13) # 6 values\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.test) == len(fh)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6]\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.test) == len(fh)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n ####################################\n #### Discontinuous fh with Gaps ####\n ####################################\n\n #### Numpy fh ----\n exp = TSForecastingExperiment()\n fh = np.array([4, 5, 6, 10, 11, 12]) # 6 values\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.test) == len(fh)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)\n\n #### List fh ----\n exp = TSForecastingExperiment()\n fh = [4, 5, 6, 10, 11, 12]\n exp.setup(data=data, fh=fh, session_id=42)\n assert np.all(exp.dataset.index == data.index)\n assert np.all(exp.train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.test) == len(fh)\n assert exp.X is None\n assert np.all(exp.y.index == data.index)\n assert exp.X_train is None\n assert exp.X_test is None\n assert np.all(exp.y_train.index == data.iloc[: (len(data) - max(fh))].index)\n assert len(exp.y_test) == len(fh)\n assert np.all(exp.dataset_transformed.index == data.index)\n assert np.all(\n exp.train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.test_transformed) == len(fh)\n assert exp.X_transformed is None\n assert np.all(exp.y_transformed.index == data.index)\n assert exp.X_train_transformed is None\n assert exp.X_test_transformed is None\n assert np.all(\n exp.y_train_transformed.index == data.iloc[: (len(data) - max(fh))].index\n )\n assert len(exp.y_test_transformed) == len(fh)", "def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_data_test_false(self, size, a_low, a_high=None):\n if a_high is None:\n a_high = self.a;\n\n data, label = self._generate_test_set(size, a_low, a_high, \n flip_structure=True);\n\n return data, label;", "def load_or_generate_data(self) -> None:\n x = np.linspace(0, 10, self.n_samples).reshape(-1, 1)\n y_sin = np.sin(x * 1.5)\n noise = np.random.randn(*x.shape)\n y = (y_sin + noise).reshape(x.shape[0], 1)\n self.x, self.y = x, y" ]
[ "0.8300088", "0.6068408", "0.5904717", "0.5585258", "0.55028343", "0.54649633", "0.54600555", "0.5418553", "0.5395617", "0.53787655", "0.53458136", "0.5342534", "0.52974266", "0.5277391", "0.526091", "0.52232075", "0.52189106", "0.5207462", "0.5204022", "0.5163287", "0.51475626", "0.51368123", "0.5135174", "0.51095444", "0.51079", "0.5099455", "0.5097614", "0.50904113", "0.50838804", "0.508168", "0.50790995", "0.50770473", "0.5072696", "0.5049476", "0.50377476", "0.5025237", "0.5022605", "0.50170076", "0.5016304", "0.5001806", "0.5001428", "0.4993024", "0.49790746", "0.49730867", "0.4966269", "0.49661562", "0.4960742", "0.4959658", "0.49564517", "0.49471807", "0.49440694", "0.4928253", "0.49266782", "0.49137273", "0.49091607", "0.49069992", "0.48973596", "0.48944667", "0.48903713", "0.48853585", "0.48813772", "0.488098", "0.48805508", "0.48801842", "0.48801473", "0.4876759", "0.4867863", "0.48654452", "0.4863626", "0.48583433", "0.48530155", "0.48484275", "0.4847342", "0.48471445", "0.48425865", "0.48233747", "0.48218885", "0.4815594", "0.48016357", "0.48007077", "0.47861063", "0.47775534", "0.4774087", "0.47725445", "0.47686055", "0.47672397", "0.47615293", "0.47599083", "0.47554237", "0.4751795", "0.47517124", "0.47457767", "0.4743876", "0.47349638", "0.47314462", "0.47292882", "0.47258043", "0.47231194", "0.47230178", "0.47225246" ]
0.83285666
0
Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target The copy of UCI ML Breast Cancer Wisconsin (Original) dataset is
def load_breast_cancer(): bc_data_train = np.load(_BREAST_CANCER_FOLDER+'bc_data.train') bc_data_test = np.load(_BREAST_CANCER_FOLDER+'bc_data.test') bc_target_train = np.load(_BREAST_CANCER_FOLDER+'bc_target.train') bc_target_test = np.load(_BREAST_CANCER_FOLDER+'bc_target.test') for i in range(len(bc_target_test)): if bc_target_test[i] == 2: bc_target_test[i] = 0 elif bc_target_test[i] == 4: bc_target_test[i] = 1 for i in range(len(bc_target_train)): if bc_target_train[i] == 2: bc_target_train[i] = 0 elif bc_target_train[i] == 4: bc_target_train[i] = 1 return (bc_data_train, bc_target_train.reshape(-1, 1), bc_data_test, bc_target_test.reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_breast_cancer():\n data = load_breast_cancer_sk()\n X = pd.DataFrame(data.data, columns=data.feature_names)\n y = pd.Series(data.target)\n y = y.map(lambda x: data[\"target_names\"][x])\n\n X.ww.init()\n y = ww.init_series(y)\n return X, y", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}", "def load_benzene_concentration_sample():\n file = Path(__file__).parent.parent / \"data/benzene_concentration_sample.csv\"\n df = pd.read_csv(file)\n y = df[\"target\"].to_numpy()\n X = df.drop(columns=\"target\").to_numpy()\n X = np.expand_dims(X, axis=1)\n return X, y", "def _breast_cancer_wisconsin_diag(location: str) -> Dataset:\n\n columns = ['id', 'target']\n data_type = ['radius', 'texture', 'perimeter', 'area', 'smoothness',\n 'compactness', 'concavity', 'concave_points', 'symmetry',\n 'fractal_dimension']\n\n # Compute proper names of the columns\n for prefix in ['mean', 'sd', 'worst']:\n for dtype in data_type:\n columns.append(f'{prefix}_{dtype}')\n\n # Read dataframe and split data into x, y\n df = pd.read_csv(joinpath(location, 'wdbc.data'), names=columns)\n y, x = df.pop('target'), df\n\n # Convert target into [0, 1]\n conv_to_class = {'M': 0, 'B': 1}\n y = y.apply(lambda x: conv_to_class[x])\n\n return x, y", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(join(module_path, 'data', 'train2.csv')) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tglobal n_samples\n\t\tn_samples = int(temp[0])\n\t\tglobal n_features\n\t\tn_features = int(temp[1])\n\t\tprint \"n samples \" + str((n_samples))\n\t\tprint \"n_features\" + str((n_features))\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tdata[count] = np.asarray(value[:-1], dtype=np.float)\n\t\t\ttarget[count] = np.asarray(value[-1], dtype=np.int)\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\t\tprint \"Number of target records is \" + str(len(target))\n\t#with open(join(module_path, 'descr', 'train.rst')) as rst_file:\n\t#\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=None,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def dataset(self):\n if self.X is not None and self.y is not None:\n return self.X, self.y\n\n self.X, self.y = self.get_BOW_from_file(self.labels[0])\n for label in self.labels[1:]:\n X_temp, y_temp = self.get_BOW_from_file(label)\n self.X = np.concatenate((self.X, X_temp))\n self.y = np.concatenate((self.y, y_temp))\n\n return self.X, self.y", "def load_bottleneck_data(training_file, validation_file):\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val", "def get_breast_cancer_data(target=\"diagnosis\"):\n data = load_breast_cancer()\n df = pd.DataFrame(data=data.data, columns=[_.replace(\" \", \"_\") for _ in data.feature_names])\n df[target] = data.target\n return df", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_data():\r\n train_dataset = h5py.File('train_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\r\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\r\n\r\n test_dataset = h5py.File('test_catvnoncat.h5', \"r\") # Change the directory as per your system\r\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\r\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\r\n\r\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\r\n \r\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\r\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\r\n \r\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def load_wdbc(random_state=None, return_X_y=False, subset='kriegel11'):\n\n wdbc = load_breast_cancer()\n X = wdbc.data\n y = wdbc.target\n feature_names = wdbc.feature_names\n\n n_outliers = 10\n is_outlier = y == 0\n idx_inlier = np.flatnonzero(~is_outlier)\n idx_outlier = np.flatnonzero(is_outlier)\n y[is_outlier] = NEG_LABEL\n\n if subset not in ['goldstein12', 'kriegel11', 'sugiyama13']:\n raise ValueError(f'invalid subset: {subset}')\n\n if subset == 'goldstein12':\n s = np.union1d(idx_inlier, idx_outlier[:n_outliers])\n\n if subset == 'kriegel11':\n rnd = check_random_state(random_state)\n s = np.union1d(\n idx_inlier,\n rnd.choice(idx_outlier, size=n_outliers, replace=False)\n )\n\n if subset != 'sugiyama13':\n # Downsample outliers\n X = X[s]\n y = y[s]\n\n if return_X_y:\n return X, y\n\n return Bunch(data=X, target=y, feature_names=feature_names)", "def on_pushButton_clicked(self):\n # TODO: not implemented yet\n print(\"加载数据\")\n \n boston = datasets.load_boston()\n train = boston.data\n target = boston.target\n \n self.X_train,self.x_test,self.y_train,self.y_true = train_test_split(train,target,test_size=0.2)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def load_bottleneck_data(network, dataset):\n\n # training files have been moved to a subdirectory for cleaner root\n training_sets_dir = './training_sets/'\n\n # build the training/validation file names from supplied flags\n training_file = training_sets_dir + network + '_' + dataset + '_100_bottleneck_features_train.p'\n validation_file = training_sets_dir + network + '_' + dataset + '_bottleneck_features_validation.p'\n print(\"Training file \", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_valid = validation_data['features']\n y_valid = validation_data['labels']\n\n return X_train, y_train, X_valid, y_valid", "def load_or_generate_data(self) -> None:\n\n # Training set defined as a 5 x 5 square:\n xg1 = np.linspace(-5, 10, 5)\n xg2 = np.linspace(0, 15, 5)\n x = np.zeros((xg1.size * xg2.size, 2))\n for i, x1 in enumerate(xg1):\n for j, x2 in enumerate(xg2):\n x[i + xg1.size * j, :] = [x1, x2]\n\n y = self.branin(x)[:, None]\n self.x, self.y = x, y", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def get_naive_Bayes_classificator(self):\n try:\n with open(TWEET_BAYES_FILENAME, 'rb') as f:\n self.classifier, self.bayes_accuracy = pickle.load(f)\n print('It was read sucessfully!')\n except IOError:\n self.train_naive_Bayes_classificator()", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def load(train_file, test_file):\n print('\\nLoad the raw training and test set data...')\n y_train, tx_train, ids_train = load_csv_data(train_file)\n y_test, tx_test, ids_test = load_csv_data(test_file)\n print('\\n... finished.')\n return y_train, tx_train, ids_train, y_test, tx_test, ids_test", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def get_train_data(self) -> Tuple[np.array, np.array, np.array]:\n train_data = []\n for season in self.__train_seasons:\n train_data.extend(self.__get_season_data(season, sys.maxsize, True))\n train_input = np.array([ExamDropEncoder.extract_features(sample, sys.maxsize) for sample in train_data])\n train_output = np.array([1.0 if get_is_mol(sample.selected_player) else 0.0 for sample in train_data])\n\n num_bins = self.get_num_bins(train_input, self.__max_splits)\n self.__discretizer = KBinsDiscretizer(n_bins = num_bins, encode = \"onehot-dense\",\n strategy = ExamDropExtractor.BIN_STRATEGY)\n train_input = self.__discretizer.fit_transform(train_input)\n train_input = self.__add_answered_on_feature(train_data, train_input)\n self.__anova_f_filter = SelectFpr(f_classif, alpha = self.__anova_f_significance)\n train_input = self.__anova_f_filter.fit_transform(train_input, train_output)\n self.__pca = PCA(n_components = self.__pca_explain)\n train_input = self.__pca.fit_transform(train_input)\n return train_input, train_output, self.__get_train_weights(train_data)", "def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)", "def load_wilt(return_X_y=False):\n\n filename_train = os.path.join(MODULE_PATH, 'data', 'wilt_train.csv.gz')\n data_train = np.loadtxt(\n filename_train, delimiter=',', dtype=object, skiprows=1\n )\n X_train = data_train[:, 1:]\n y_train = data_train[:, 0]\n\n filename_test = os.path.join(MODULE_PATH, 'data', 'wilt_test.csv.gz')\n data_test = np.loadtxt(\n filename_test, delimiter=',', dtype=object, skiprows=1\n )\n X_test = data_test[:, 1:]\n y_test = data_test[:, 0]\n\n X = np.concatenate([X_train, X_test])\n y = np.concatenate([y_train, y_test])\n\n is_outlier = y == 'w'\n y[~is_outlier] = POS_LABEL\n y[is_outlier] = NEG_LABEL\n\n X = X.astype(float)\n y = y.astype(int)\n feature_names = np.array([\n 'GLCM_pan', 'Mean_Green', 'Mean_Red', 'Mean_NIR',\n 'SD_pan'\n ])\n\n if return_X_y:\n return X, y\n\n return Bunch(data=X, target=y, feature_names=feature_names)", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def bwght(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'bwght.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/wooldridge/bwght.csv'\n maybe_download_and_extract(path, url,\n save_file_name='bwght.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def load_data():\n global X, Y, X_final, Y_final, input_shape\n\n pickle_in = open(\"X_train.pickle\", \"rb\")\n X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n Y_final = pickle.load(pickle_in)\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_rows, img_cols)\n else:\n input_shape = (img_rows, img_cols, 3)\n\n X = X.astype('float32')\n X /= 255\n X_final = X_final.astype('float32')\n X_final /= 255\n print('X shape:', X.shape)\n print(X.shape[0], 'Samples')\n\n Y_final = to_categorical(Y_final, num_classes)\n\n if not b_eval_advanced:\n Y = to_categorical(Y, num_classes)\n\n print(\"Y_final Shape\",Y_final.shape)", "def main():\n\n trainData = os.getcwd() + '/data/traindata.txt'\n trainLabels = os.getcwd() + '/data/trainlabels.txt'\n\n #testData = os.getcwd() + '/data/traindata.txt'\n #testLabels = os.getcwd() + '/data/trainlabels.txt'\n\n testData = os.getcwd() + '/data/testdata.txt'\n testLabels = os.getcwd() + '/data/testlabels.txt'\n\n #trainData = os.getcwd() + '/data/toyData.txt'\n #trainLabels = os.getcwd() + '/data/toyLabel.txt'\n #testData = os.getcwd() +'/data/toyTestData.txt'\n #testLabels = os.getcwd() + '/data/toyTestLabel.txt'\n\n #print(trainData, trainLabels)\n myClassifier = NBClassifier.new(NBClassifier.MODE_BERNOULI)\n myClassifier.setTrainData(trainData, trainLabels)\n #print(myClassifier)\n\n #singleTestData = ['Chinese', 'Chinese', 'Chinese', 'Tokyo', 'Japan']\n #prediction = myClassifier.predict(singleTestData)\n #print(f'{singleTestData} >>> {prediction}')\n predictions = myClassifier.predictSet(testData)\n accuracy = myClassifier.reportAccuracy(testLabels)\n\n #print(predictions)\n print(accuracy)", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def get_dataset(self):\n\n trainset = datasets.CIFAR10('datasets/CIFAR10/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR10('datasets/CIFAR10/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def fetch_boston_housing_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.boston_housing_df(\n test_size=0.33\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n black_median = np.median(train_X[\"B\"])\n label_median = np.median(train_y)\n\n if preprocess:\n # 1000(Bk - 0.63)^2 where Bk is the proportion of Blacks by town\n B = pd.Series(orig_X[\"B\"] > black_median, dtype=np.float64)\n encoded_X = orig_X.assign(B=B)\n fairness_info = {\n \"favorable_labels\": [[-10000.0, label_median]],\n \"protected_attributes\": [\n {\"feature\": \"B\", \"reference_group\": [0]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [[-10000.0, label_median]],\n \"protected_attributes\": [\n # 1000(Bk - 0.63)^2 where Bk is the proportion of Blacks by town\n {\"feature\": \"B\", \"reference_group\": [[0.0, black_median]]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def load_data(self):\n\n\t\tboard_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'board_data.dat'))\n\t\tcows_data = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'cows_data.dat'))\n\t\tlabels = np.load(os.path.join(\n\t\t\t'reinforcement_learning_data_final',\n\t\t\t'labels.dat'))\n\t\tlabels = labels.reshape((len(labels), 1))\n\n\t\tpermutation = np.random.permutation(len(labels))\n\n\t\treturn (board_data[permutation],\n\t\t cows_data[permutation],\n\t\t labels[permutation])", "def load_data():\r\n\r\n mnist_file = gzip.open('../data/mnist.pkl.gz', 'rb')\r\n ## opening the gz archive file by using gzip's open function\r\n\r\n training_data, validation_data, test_data = cPickle.load(mnist_file, encoding='latin1')\r\n ## loading the training, validation and test data by using cPickle's load function\r\n ## passing encoding parameter as ``latin1``\r\n\r\n mnist_file.close()\r\n ## closing the mnist_file\r\n\r\n return (training_data, validation_data, test_data)", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def _load_data(self):\n\n from sklearn.datasets import fetch_openml\n mnist = fetch_openml('mnist_784', cache=True)\n # data_x = np.array(final_data_df)\n feat_data = np.array(mnist.data).astype('float32')\n target_data = mnist.target.astype('int64')\n shuffling_index = np.arange(feat_data.shape[0])\n np.random.shuffle(shuffling_index)\n feat_data = feat_data[shuffling_index]\n target_data = target_data[shuffling_index]\n\n cur_data_list = []\n cur_target_list = []\n for i in range(10):\n cur_mask = target_data == i\n cur_data_list.append(feat_data[cur_mask][:500])\n cur_target_list.append(target_data[cur_mask][:500])\n feat_data = np.concatenate(cur_data_list)\n target_data = np.concatenate(cur_target_list)\n\n self.data_x = feat_data\n self.data_y = self.to_one_hot_encoding(target_data)\n self.numerical_idx = np.arange(784)\n self.non_num_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = self.data_x.astype('float32')\n\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def read_data():\n csv_data = pd.read_csv('./dataset.csv')\n x = csv_data[['X1', 'X2']]\n x = x.values # numpy array for x: (180, 2)\n y = csv_data['Label']\n y = y.values # numpy array for y: (180, )\n\n\t# shuffle the data\n total = x.shape[0]\n mask = list(range(total))\n np.random.shuffle(mask)\n x = x[mask]\n y = y[mask]\n\t\n\t# 80 percent for train and 20 percent for test\n train_split = int(0.8 * total)\n x_train, y_train = x[:train_split], y[:train_split]\n x_test, y_test = x[train_split:], y[train_split:]\n return x_train, y_train, x_test, y_test", "def get_training_data(self):\n\n # this actually never was a set\n # src_set = self.target['src'].values\n # dst_set = self.target['dst'].values\n\n # train_negative = self.get_negative_edges(src_set, dst_set, self.train_ind.shape[0]) # * self.K)\n # test_negative = self.get_negative_edges(src_set, dst_set, self.test_ind.shape[0])\n\n train_positive = self.target.iloc[self.train_edge_ind].values\n test_positive = self.target.iloc[self.test_edge_ind].values\n\n # # print(train_positive.shape, train_negative.shape, test_positive.shape, test_negative.shape)\n # print(f\"Working with {train_positive.shape[0]} positive and {train_negative.shape[0]} negative samples in the train set, {test_positive.shape[0]} and {test_negative.shape[0]} - in test set\")\n\n X_train = train_positive\n X_test = test_positive\n\n y_train = np.ones((self.train_edge_ind.shape[0],))\n y_test = np.ones((self.test_edge_ind.shape[0],))\n\n # X_train = np.vstack([\n # train_positive,\n # train_negative\n # ])\n\n # X_test = np.vstack([\n # test_positive,\n # test_negative\n # ])\n\n # y_train = np.concatenate([np.ones((self.train_ind.shape[0],)), np.zeros((self.train_ind.shape[0]),)]) # self.train_ind.shape[0]) * self.K\n # y_test = np.concatenate([np.ones((self.test_ind.shape[0],)), np.zeros((self.test_ind.shape[0],))])\n\n assert X_train.shape[0] == y_train.shape[0]\n assert X_test.shape[0] == y_test.shape[0]\n\n def shuffle(X, y):\n ind_shuffle = np.arange(0, X.shape[0])\n np.random.shuffle(ind_shuffle)\n return X[ind_shuffle], y[ind_shuffle]\n\n self.X_train, self.y_train = shuffle(X_train, y_train)\n self.X_test, self.y_test = shuffle(X_test, y_test)\n\n print(f\"Splitting into {self.X_train.shape[0]} train and {self.X_test.shape[0]} test samples\")\n\n # return X_train, X_test, y_train, y_test", "def learn(self, Xtrain, ytrain):", "def get_dataset(self):\n\n trainset = datasets.CIFAR100('datasets/CIFAR100/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR100('datasets/CIFAR100/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def get_toy_classification_data(\n target=\"target\", n_samples=300, n_classes=2, shuffle=True, random_state=32, **kwargs\n):\n x, y = make_classification(\n n_samples=n_samples,\n n_classes=n_classes,\n shuffle=shuffle,\n random_state=random_state,\n **kwargs\n )\n train_df = pd.DataFrame(data=x, columns=range(x.shape[1]))\n train_df[target] = y\n return train_df", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def load_data(flatten=True):\n if flatten:\n reshape = _flatten\n else:\n reshape = _square\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = reshape(x_train)\n x_test = reshape(x_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n y_train = to_categorical(y_train, 10)\n y_test = to_categorical(y_test, 10)\n return x_train, y_train, x_test, y_test", "def download_uci_bikeshare(data_dir: pathlib.Path) \\\n -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n # Downlaod the dataset (with caching to disk)\n train_pkl = data_dir / 'train_df.pkl'\n test_pkl = data_dir / 'test_df.pkl'\n\n if data_dir.exists() and len(tuple(data_dir.iterdir())) > 1:\n train_df = pd.read_pickle(train_pkl)\n test_df = pd.read_pickle(test_pkl)\n else:\n # import and save the dataset to a gitignored directory\n data_dir.mkdir(exist_ok=True)\n with (data_dir / '.gitignore').open('w') as gitignore:\n gitignore.write('*.pkl\\n')\n\n # download the dataset from UCI\n zip_url = 'https://archive.ics.uci.edu/ml/' \\\n 'machine-learning-databases/00275/Bike-Sharing-Dataset.zip'\n z = zipfile.ZipFile(io.BytesIO(requests.get(zip_url).content))\n dtypes = {\n 'holiday': 'bool',\n 'workingday': 'bool',\n 'weathersit': 'category',\n 'season': 'category'\n }\n with z.open('hour.csv') as csv:\n full_df = pd.read_csv(csv, dtype=dtypes)\n\n # split train/test by year\n is_2011 = full_df['yr'] == 0\n train_df = full_df[is_2011].reset_index(drop=True)\n test_df = full_df[~is_2011].reset_index(drop=True)\n\n # serialize datasets to disk\n train_df.to_pickle(train_pkl)\n test_df.to_pickle(test_pkl)\n\n return train_df, test_df", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def get_data(dataset):\n \n if dataset not in possible_datasets:\n datasets_output = ', '.join(possible_datasets)\n raise ValueError('dataset must be one of: {}'.format(datasets_output))\n \n if dataset == 'mnist':\n (clean_train, __), (clean_test, __) = mnist.load_data()\n elif dataset == 'fashion-mnist':\n (clean_train, __), (clean_test, __) = fashion_mnist.load_data()\n\n clean_train = clean_train.astype('float32') / 255.\n clean_train = clean_train.reshape(data_shape)\n clean_test = clean_test.astype('float32') / 255.\n clean_test = clean_test.reshape(data_shape)\n\n noisy_test = add_gaussian_noise_np(clean_test, 0.0, 0.4)\n return clean_train, clean_test, noisy_test", "def palm_beach(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'palm_beach.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Stat2Data/PalmBeach.csv'\n maybe_download_and_extract(path, url,\n save_file_name='palm_beach.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def get_dataset_for_classification() -> Tuple[np.ndarray, np.ndarray]:\n dataset = np.array(\n [[1, 2, 0],\n [2, 3, 0],\n [3, 4, 0],\n [4, 5, 0],\n [5, 6, 0],\n [6, 1, 1],\n [7, 2, 1],\n [8, 3, 1],\n [9, 4, 1],\n [1, 5, 0],\n [2, 6, 0],\n [3, 7, 0],\n [4, 8, 0],\n [5, 9, 0],\n [6, 2, 1]],\n dtype=float\n )\n X = dataset[:, :-1]\n y = dataset[:, -1]\n return X, y", "def load_projected_binary_dataset(saved_dataset):\n num_train = len(saved_dataset.train_labels)\n assert len(saved_dataset.train_labels) == len(saved_dataset.train_features)\n num_valid = len(saved_dataset.valid_labels)\n assert len(saved_dataset.valid_labels) == len(saved_dataset.valid_features)\n num_test = len(saved_dataset.test_labels)\n assert len(saved_dataset.test_labels) == len(saved_dataset.test_features)\n if num_train == 0 or num_valid == 0:\n raise ValueError('Number of train/valid examples'\n ' must be more than zero.')\n feature_size = len(saved_dataset.train_features[0].features)\n\n train_data = np.zeros((num_train, feature_size))\n train_labels = np.zeros(num_train)\n for i in range(num_train):\n train_labels[i] = saved_dataset.train_labels[i]\n for j in range(feature_size):\n train_data[i][j] = saved_dataset.train_features[i].features[j]\n\n valid_data = np.zeros((num_valid, feature_size))\n valid_labels = np.zeros(num_valid)\n for i in range(num_valid):\n valid_labels[i] = saved_dataset.valid_labels[i]\n for j in range(feature_size):\n valid_data[i][j] = saved_dataset.valid_features[i].features[j]\n\n if num_test > 0:\n test_data = np.zeros((num_test, feature_size))\n test_labels = np.zeros(num_test)\n for i in range(num_test):\n test_labels[i] = saved_dataset.test_labels[i]\n for j in range(feature_size):\n test_data[i][j] = saved_dataset.test_features[i].features[j]\n else:\n test_data = None\n test_labels = None\n\n return (train_data, train_labels, valid_data, valid_labels,\n test_data, test_labels)", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data", "def get_dataset(self):\n trainset = datasets.KMNIST('datasets/KMNIST/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.KMNIST('datasets/KMNIST/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def load_as_one_hot(self):\n\n labels = [] \n examples = [] \n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n # load examples and labels\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_feat_list = read_cuis(file_path)\n examples.append(' '.join(file_feat_list))\n \n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n\n examples = self.token2int.texts_to_matrix(examples, mode='binary')\n\n return examples, labels", "def load_data(train_path, test_path):\n\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n print(\"number of training examples = \" + str(train_data.shape[0]))\n print(\"number of test examples = \" + str(test_data.shape[0]))\n print(\"train shape: \" + str(train_data.shape))\n print(\"test shape: \" + str(test_data.shape))\n\n return train_data, test_data", "def load_data(train_path, test_path):\n\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n print(\"number of training examples = \" + str(train_data.shape[0]))\n print(\"number of test examples = \" + str(test_data.shape[0]))\n print(\"train shape: \" + str(train_data.shape))\n print(\"test shape: \" + str(test_data.shape))\n\n return train_data, test_data", "def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def train_naive_Bayes_classificator(self):\n positive_tweet_tokens = twitter_samples.tokenized(\n 'positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized(\n 'negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n\n negative_dataset = [(token, \"negative\") for token in self.format_tweets_for_model(\n negative_cleaned_tokens_list)]\n positive_dataset = [(token, \"positive\") for token in self.format_tweets_for_model(\n positive_cleaned_tokens_list)]\n\n dataset = positive_dataset + negative_dataset\n\n shuffle(dataset)\n\n self.train_data = dataset[:8000]\n self.test_data = dataset[8000:]\n\n self.classifier = NaiveBayesClassifier.train(self.train_data)\n self.bayes_accuracy = classify.accuracy(\n self.classifier, self.test_data)\n with open(TWEET_BAYES_FILENAME, 'wb') as f:\n pickle.dump(\n (self.classifier, self.bayes_accuracy),\n f,\n protocol=pickle.HIGHEST_PROTOCOL)", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def load_data():\r\n f = gzip.open('mnist.pkl.gz', 'rb')\r\n training_data, validation_data, test_data = pickle.load(f,encoding='bytes')\r\n f.close()\r\n return (training_data, validation_data, test_data)", "def load_bbbc001(split='index',\n reload=True,\n data_dir=None,\n save_dir=None,\n **kwargs):\n # Featurize BBBC001 dataset\n bbbc001_tasks = [\"cell-count\"]\n\n if data_dir is None:\n data_dir = DEFAULT_DIR\n if save_dir is None:\n save_dir = DEFAULT_DIR\n\n if reload:\n save_folder = os.path.join(save_dir, \"bbbc001-featurized\", str(split))\n loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(\n save_folder)\n if loaded:\n return bbbc001_tasks, all_dataset, transformers\n dataset_file = os.path.join(data_dir, \"BBBC001_v1_images_tif.zip\")\n labels_file = os.path.join(data_dir, \"BBBC001_v1_counts.txt\")\n\n if not os.path.exists(dataset_file):\n deepchem.utils.download_url(url=BBBC1_IMAGE_URL, dest_dir=data_dir)\n if not os.path.exists(labels_file):\n deepchem.utils.download_url(url=BBBC1_LABEL_URL, dest_dir=data_dir)\n # Featurize Images into NumpyArrays\n loader = deepchem.data.ImageLoader()\n dataset = loader.featurize(dataset_file, in_memory=False)\n\n # Load text file with labels\n with open(labels_file) as f:\n content = f.readlines()\n # Strip the first line which holds field labels\n lines = [x.strip() for x in content][1:]\n # Format is: Image_name count1 count2\n lines = [x.split(\"\\t\") for x in lines]\n counts = [(float(x[1]) + float(x[2])) / 2.0 for x in lines]\n y = np.array(counts)\n\n # This is kludgy way to add y to dataset. Can be done better?\n dataset = deepchem.data.DiskDataset.from_numpy(dataset.X, y)\n\n if split == None:\n transformers = []\n logger.info(\"Split is None, no transformers used for the dataset.\")\n return bbbc001_tasks, (dataset, None, None), transformers\n\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n }\n if split not in splitters:\n raise ValueError(\"Only index and random splits supported.\")\n splitter = splitters[split]\n\n logger.info(\"About to split dataset with {} splitter.\".format(split))\n frac_train = kwargs.get(\"frac_train\", 0.8)\n frac_valid = kwargs.get('frac_valid', 0.1)\n frac_test = kwargs.get('frac_test', 0.1)\n\n train, valid, test = splitter.train_valid_test_split(\n dataset,\n frac_train=frac_train,\n frac_valid=frac_valid,\n frac_test=frac_test)\n transformers = []\n all_dataset = (train, valid, test)\n if reload:\n deepchem.utils.save.save_dataset_to_disk(save_folder, train, valid, test,\n transformers)\n return bbbc001_tasks, all_dataset, transformers", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(sys.argv[1]) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tn_samples = int(temp[0])\n\t\tn_features = int(temp[1])\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tif len(value)<28:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tdata[count] = np.asarray(value[:28], dtype=np.float)\n\t\t\t\ttarget[count] = np.asarray(value[28], dtype=np.int)\t\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\n\twith open(join(module_path, 'descr', 'crawl.rst')) as rst_file:\n\t\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=fdescr,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def boston_housing(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'housing.data'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/' \\\n 'housing/housing.data'\n maybe_download_and_extract(path, url)\n\n x_train = pd.read_csv(os.path.join(path, filename),\n header=None, delimiter=r\"\\s+\").as_matrix()\n columns = ['CRIM',\n 'ZN',\n 'INDUS',\n 'CHAS',\n 'NOX',\n 'RM',\n 'AGE',\n 'DIS',\n 'RAD',\n 'TAX',\n 'PTRATIO',\n 'B',\n 'LSTAT',\n 'MEDV']\n metadata = {'columns': columns}\n return x_train, metadata", "def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)", "def loadData(self):\n # Load the raw CIFAR-10 data\n num_training = 49000\n num_validation = 1000\n num_test = 1000\n subtract_mean = True\n\n cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2)\n X_val = X_val.transpose(0, 3, 1, 2)\n X_test = X_test.transpose(0, 3, 1, 2)\n\n # Package data into a dictionary\n self.data = {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }", "def binary_classification_dataset() -> tf.data.Dataset:\n\n # Create features and labels\n X = tf.random.normal(shape=(100, 3))\n y = tf.random.uniform(minval=0, maxval=2, dtype=tf.int32, shape=(100,)) # Binary labels\n\n return tf.data.Dataset.from_tensor_slices((X, y))", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def get_data(patients=None, mris_train=None, batch_size=100, id_train=0, is_one_hot=True):\n \n # Get size of both sets (defined by batch size)\n Npatients_train = (mris_train.shape[0]//batch_size)*batch_size\n\n # Get labels for both sets\n patients_train = patients[patients['train_valid_test'] == id_train][:Npatients_train]\n if is_one_hot:\n train_labels = CNN.convert_to_one_hot(patients_train['diagnosis']-1, len( np.unique(patients_train['diagnosis'])))\n else:\n train_labels = (patients_train['diagnosis']-1).as_matrix()\n\n # Get only wanted data (multiple of batch size) and convert to float\n train_data = mris_train[:Npatients_train, :]\n train_data = train_data.astype('float32')\n\n # Data pre-processing for train and validation\n train_data = CNN.normalize_data(train_data)\n\n # Print final shapes of data and labels\n # print('Train data shape=', train_data.shape)\n # print('Train data labels=', train_labels.shape, ' labels_sum=', np.sum(train_labels, axis=0))\n \n return train_data, train_labels", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def random_cls_dataset(request):\n set_seed()\n shape = request.param.get('shape', 10)\n size = request.param.get('size', 100)\n X, Y = make_classification(n_samples=2*size, n_features=shape, n_classes=10, n_informative=10, n_redundant=0)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5)\n Y_train, Y_test = Y_train.astype(np.int64), Y_test.astype(np.int64)\n return (X_train, Y_train), (X_test, Y_test)", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def load_training_data(s3: str = \"s3://epam-hack4med-dataset\") -> pd.DataFrame:\n # Load labels\n df_labels = pd.read_csv(f\"{s3}/CRACoV-ETYKIETY.csv\")\n df_labels[id_cols] = df_labels[id_cols].astype(int)\n df_labels = df_labels.set_index(id_cols)\n labels = df_labels[[basic_target]]\n idx = labels.index\n\n # Load hospital admission file (PRZYJECIE)\n df_admission = pd.read_csv(f\"{s3}/CRACoV-PRZYJECIE.csv\")\n binary_adm_vars = [x for x in basic_adm_vars if df_admission[x].isin([\"Tak\", \"Nie\"]).any()]\n other_adm_vars = [x for x in basic_adm_vars if x not in binary_adm_vars]\n adm = df_admission.copy()\n adm = adm[id_cols + binary_adm_vars + other_adm_vars]\n adm = adm.set_index(id_cols).reindex(idx)\n \n # Load biochem analyses\n biochem_raw = pd.read_csv(f\"{s3}/CRACoV-BIOCHEMIA.csv\", parse_dates=['DATA_WYK']).sort_values('DATA_WYK')\n biochem = (\n biochem_raw.loc[biochem_raw.KOD.isin(basic_bio_codes)]\n .pivot_table(index=['LP.', 'ID_LAB'], columns='KOD', values='WYNIK', aggfunc='first')\n .reindex(idx)\n )\n # Merge it all together\n Xy_raw = pd.concat([labels, adm, biochem], axis='columns')\n return Xy_raw", "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def test_train_dataset(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n expected = [\n {'alpha': 0.6931471805599453,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 1.3},\n {'alpha': 0.9729550745276565,\n 'dim': 1,\n 'inequal': 'lt',\n 'threshold': 1.0},\n {'alpha': 0.8958797346140273,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 0.90000000000000002}\n ]\n self.assertEqual(classifiers, expected)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', mode='rb')\n\n # NOTE: I get errors when I don't use encoding='latin1' because of Python 2 vs Python 3 compatibility issues\n # training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n training_data, validation_data, test_data = pickle.load(f)\n\n f.close()\n\n return training_data, validation_data, test_data", "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples", "def load_data(include_extra=False):\n\n if not os.path.exists(outfile_train):\n download_svhn(url_train, outfile_train)\n\n if not os.path.exists(outfile_test):\n download_svhn(url_test, outfile_test)\n\n mat = sp.io.loadmat(outfile_train)\n x_train = mat['X']\n y_train = mat['y']\n\n mat = sp.io.loadmat(outfile_test)\n x_test = mat['X']\n y_test = mat['y']\n\n if include_extra:\n if not os.path.isfile(outfile_extra):\n download_svhn_extra()\n mat_e = sp.io.loadmat(outfile_extra)\n x_train = np.concatenate((x_train, mat_e['X']), axis=-1)\n y_train = np.concatenate((y_train, mat_e['y']), axis=0)\n\n x_train = preprocess(x_train)\n x_test = preprocess(x_test)\n y_train[y_train == 10] = 0\n y_test[y_test == 10] = 0\n y_test = np.squeeze(y_test)\n y_train = keras.utils.to_categorical(y_train)\n y_train = y_train.astype('float32')\n\n return x_train, y_train, x_test, y_test, [str(i) for i in range(10)]", "def train_and_test_with_naive_bayes(data, class_names):\n # Train data\n class_normalized_data = normalize_data(data, class_names[0])\n class_training_table = util.get_training_table(class_normalized_data, 0, get_training_index())\n class_model = nb.train(class_training_table[0], class_training_table[1])\n\n # Get Class Test Data\n class_index = 0\n class_test_feature_table = util.get_test_table(class_normalized_data, class_index, get_test_index())[0]\n class_test_classes = util.get_test_table(class_normalized_data, class_index, get_test_index())[1]\n\n original_indices = get_test_index()\n # Go through each line of test data and compare results.\n for index in range(len(class_test_classes)):\n class_features = util.get_test_features(class_test_feature_table, index)\n result_class = nb.get_classification(class_features, class_model) \n expected_class = class_test_classes[index]\n matched = result_class == expected_class\n util.print_test_result(original_indices[index], matched, [result_class], expected_class, class_names)", "def initializing():\n data = np.array(pd.read_csv('data.csv'))[:,1:]\n\n X = data[:,1:-1].astype(int)\n y = data[:,-1].astype(int)\n y_binary = (y == 1).astype(int)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, \n y_binary, \n test_size=0.25, \n )\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n return (X_train, X_test, y_train, y_test, X, y_binary)" ]
[ "0.7141485", "0.69764185", "0.6372257", "0.6351349", "0.6293626", "0.60629076", "0.5941613", "0.59359765", "0.5885802", "0.5858357", "0.58188754", "0.57530105", "0.5714098", "0.5703062", "0.56975883", "0.5686469", "0.5659355", "0.5605132", "0.560234", "0.55977595", "0.5596762", "0.5585393", "0.5571161", "0.5550326", "0.554698", "0.5539647", "0.5532786", "0.55197036", "0.55194855", "0.55177706", "0.55176103", "0.55096364", "0.5504049", "0.5482518", "0.54749095", "0.54660827", "0.5453134", "0.5451722", "0.54485893", "0.5439337", "0.54337543", "0.54213077", "0.54157007", "0.54078215", "0.5397352", "0.5389131", "0.53841895", "0.53824997", "0.53788865", "0.53687173", "0.53559077", "0.53558934", "0.5343644", "0.53363556", "0.53324056", "0.53309387", "0.53238624", "0.5322705", "0.53224665", "0.5320941", "0.53129077", "0.5307104", "0.530656", "0.5305527", "0.52981305", "0.52981305", "0.5292418", "0.5290899", "0.52900213", "0.52888775", "0.5282661", "0.5280146", "0.52698904", "0.5263978", "0.52592725", "0.52430725", "0.5238419", "0.52343094", "0.5230492", "0.52292484", "0.5227085", "0.5225382", "0.522441", "0.5223933", "0.52169997", "0.52140796", "0.52118164", "0.52112186", "0.5204036", "0.51988894", "0.5196036", "0.51918584", "0.5184999", "0.51841927", "0.5183128", "0.51695937", "0.51624364", "0.5161117", "0.5156411", "0.5151377" ]
0.76880604
0
add(Vector,Vector) adds two vectors
def add(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def vectorAdd(a, b):\n return [a[i] + b[i] for i, j in enumerate(a)]", "def __add__(self,other):\n return Vector(self.x + other.x, self.y+other.y)\n pass", "def __add__(self,other):\n return Vector(self.x+other.x,self.y+other.y,self.z+other.z)", "def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y)", "def vector_sum(a, b):\n return a[0] + b[0], a[1] + b[1]", "def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])", "def __iadd__(self,other):\n return Vector(self.x + other.x, self.y + other.y)\n pass", "def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])", "def __add__(self, vector):\n return self.translated(vector)", "def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]", "def sum_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] + vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def __add__(self, other):\n if isinstance(other, Vector):\n a = self._ar + other._ar\n else:\n a = self._ar + numpy.array(other)\n return Vector(a)", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def __add__(self, other):\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self\n self._check_vector(other)\n return Vector(self.args + other.args)", "def add(self, vector):\n self.x += vector.x\n self.y += vector.y", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def test__vector_addition__given_two_vector__return_correct_vector():\n assert Vector((0, 1, 2)) + Vector((3, 4, 5)) == Vector((3, 5, 7))", "def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def addAt(self, vector, id1, id2):\n self.matrix.update_add_at(vector,\n numerix.asarray(id1, dtype='int32'),\n numerix.asarray(id2, dtype='int32'))", "def add(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n if len(vec2) != len(self):\n raise DifferentLengthVectors(self, vec2)\n\n return Vector(*[self[i]+vec2[i] for i in range(len(self))])", "def __add__(self, other):\n\t\tself.__seqvector.vec += other.__seqvector.vec\n\t\treturn self", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def testSum(self):\n v1 = Vector(1, 2, 3)\n v2 = Vector(4, 5, 6)\n v1 += v2\n assert(len(v1) == 3)\n assert v1[0] == 5\n assert v1[1] == 7\n assert v1[2] == 9\n\n v1 = Vector(9, 8, 7)\n v2 = Vector(3, 2, 1)\n v1 -= v2\n assert len(v1) == 3\n assert v1[0] == 6\n assert v1[1] == 6\n assert v1[2] == 6", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def __add__(self, v2):\n\t\treturn Vect2D(self._vec+v2._vec)", "def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def test_add_with_vec_argument(self):\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n result = a + b\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(result, expected_result)", "def test_iadd_with_vec_argument(self):\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n a += b\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(a, expected_result)", "def add_vectors(coord, vector):\n return tuple(c1+c2 for c1,c2 in zip(coord, vector))", "def add(self, a, b):\n return a + b", "def __add__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj + other\n newValue = self.value + other.value\n\n return self._newMV(newValue)", "def __iadd__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n self._vectors = [a + b for a, b in zip(self, other)]\n return self\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n self._vectors = [a + b for a in self]\n return self", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def __add__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x + other, self._vector.y + other, self._vector.z + other))\n return self", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def add(a, b):\n return np.array([x + y for x, y in zip(a, b)])", "def __add__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] + other[i]\n\n return v", "def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)", "def __add__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] + ox, self[1] + oy))", "def _add_vectors(v1, v2):\n x = math.cos(v1[1]) * v1[0] + math.cos(v2[1]) * v2[0]\n y = math.sin(v1[1]) * v1[0] + math.sin(v2[1]) * v2[0]\n\n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n return (length, angle)", "def test_add_different_sizes():\n Vector(1.0) + Vector(2.0, 3.0)", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def test_add_with_vec_argument(self):\n\n from pedemath.vec3 import add_v3\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n result = add_v3(a, b)\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(result, expected_result)", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result", "def __add__(self, other):\n if not isinstance(other, Matrix) or not self.equal_size(other):\n raise ValueError(\"Can only add two Matrix objects with same dimensions\")\n\n vectors = list()\n for i in range(self.m):\n v1 = self.vectors[i]\n v2 = other.vectors[i]\n vectors.insert(i, v1 + v2)\n return Matrix(vectors)", "def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]", "def add(self, x, y):\n pass", "def __add__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n return other.from_points(\n a + b for a, b in zip(self, other))\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a + b for a in self)", "def __iadd__( self, vector3 ):\n return self.add( vector3 )", "def add( a, b ):\n return a + b", "def add(self,v2): \n n = len(self.a)\n m = len(v2.a)\n c = []\n if n != m:\n print(\"Incompatible Types\")\n return\n\n for i in range(n):\n c.append(self.a[i]+v2.a[i])\n\n return c", "def add(x, y):\n\n return x + y", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results", "def plus(self, that):\n\t\tif(self.d != that.d):\n\t\t\traise ValueError(\"Vector lengths disagree\")\n\t\tc = SparseVector(self.d)\n\t\tfor i in self.st.keys():\n\t\t\tc.put(i, self.get(i))\n\t\tfor i in that.st.keys():\n\t\t\tc.put(i, that.get(i) + c.get(i))\n\t\treturn c", "def add(first, second):\n return first + second", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(a, b):\n return [a[i] + b[i] for i in range(2)]", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add_vectors(v1,v2):\n \n #iterates through second dictionnary\n for key in v2:\n #if key is in v1 and v2 then we would add the values\n if key in v1:\n v1[key] = v1[key] +v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]\n #if the key is not in v1 then we create a new key with the same value in v2\n elif key not in v1:\n v1[key] = v2[key]\n #checks if the value at current key is 0\n if v1[key] == 0:\n # if value is 0 then we delete the key \n del v1[key]", "def add(a,b):\n\treturn a+b", "def sum(self, vector):\n\n # return (self.from_list([x+vector.vector[self.vector.index(x)]\n # for x in self.vector]))\n return Vector(self.x + vector.x, self.y + vector.y, self.z + vector.z)", "def add(a,b):\r\n return a+b", "def addInPlace(self, value1, value2):\n raise NotImplementedError", "def __add__(self, other):\r\n return self.add(other)", "def add(a,b):\n return a + b", "def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)", "def ADD (self, n1, n2):", "def add(a, b):\n return a+b", "def __add__(self, other):\n pass", "def __add__(self, other):\n pass", "def __add__(self, other):\n raise NotImplementedError" ]
[ "0.8628645", "0.8308469", "0.793092", "0.7919341", "0.7902262", "0.78230643", "0.7809441", "0.77952737", "0.77811813", "0.7755966", "0.775354", "0.7749087", "0.77490765", "0.7742617", "0.77359337", "0.7711612", "0.7693798", "0.7677173", "0.7629188", "0.75305533", "0.7523501", "0.74917877", "0.7477097", "0.74509805", "0.7441949", "0.74281836", "0.74063504", "0.73966074", "0.7386804", "0.73214924", "0.73086166", "0.72970325", "0.72932774", "0.72919524", "0.72657907", "0.7235182", "0.7214551", "0.71689034", "0.71665454", "0.7152891", "0.71271974", "0.71036047", "0.7086594", "0.7080451", "0.7071264", "0.7064831", "0.7060728", "0.70421225", "0.7027784", "0.7012241", "0.6969077", "0.6955338", "0.6954923", "0.6942892", "0.6932007", "0.69319516", "0.691562", "0.69034207", "0.6902353", "0.68863225", "0.6880707", "0.6873417", "0.6838394", "0.68266505", "0.68232954", "0.68129927", "0.68129927", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.6802587", "0.67843974", "0.67735374", "0.67735374", "0.67735374", "0.67735374", "0.67735374", "0.67735374", "0.6768191", "0.6755332", "0.67431784", "0.67399454", "0.6722833", "0.67209536", "0.6690611", "0.6676589", "0.6672509", "0.66653454", "0.66588604", "0.66588604", "0.66350895" ]
0.8094569
2
sub(Vector,Vector) subtracts second vector from first one
def sub(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.x-other.x, first.y-other.y, first.z-other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_sub(v1,v2):\n return Vector(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)", "def vec_sub(x, y):\r\n return [a - b for a, b in zip(x, y)]", "def vector_subtract(v1, v2):\n return v1[0] - v2[0], v1[1] - v2[1]", "def vector_substraction(a, b):\n return a[0] - b[0], a[1] - b[1]", "def vec_sub (x, y):\n return [x_i - y_i for (x_i, y_i) in zip (x, y)]", "def sub(self, a, b):\n return a - b", "def sub(a,b):\n return [a[0]-b[0],a[1]-b[1],a[2]-b[2],1.0]", "def __sub__(self, other):\n return Vec2d(self.v[0] - other[0], self.v[1] - other[1])", "def sub(a, b):\n return [a[i] - b[i] for i in range(2)]", "def sub(x, y):\n return x - y", "def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])", "def __sub__(self,other):\n return Vector(self.x - other.x, self.y-other.y)\n pass", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def sub(a, b):\n return a - b", "def _subVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] - X2[i] for i in range(len(X1))]", "def sub(a,b):\r\n return a-b", "def subtractVector(self, subtrahend):\n result = self.addVector(subtrahend.scalarMultiplication(-1.0))\n return result", "def test__vector_subtraction__given_two_vectors__return_correct_vector():\n assert Vector((0, 1, 2)) - Vector((3, 4, 5)) == Vector((-3, -3, -3))", "def pairwise_sub(a, b):\n return [a[i]-b[i] for i in xrange(0, min(len(a), len(b)))]", "def subtract(first, second):\n return first - second", "def test_sub_with_vec_argument(self):\n\n a = Vec3(2, 4, 6)\n b = Vec3(1, 2, 3)\n\n result = a - b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(result, expected_result)", "def sub(o1, o2):\n return o1-o2", "def __sub__(self, other):\n if isinstance(other, Vector):\n a = self._ar - other._ar\n else:\n a = self._ar - numpy.array(other)\n return Vector(a)", "def substract(self, vector):\n\n # return (self.from_list([vector.vector[self.vector.index(x)]-x for x in\n # self.vector]))\n return Vector(self.x - vector.x, self.y - vector.y, self.z - vector.z)", "def subtract_vectors(vector_1, vector_2):\n new_coordinates = []\n index = 0\n while index < vector_1.dimension:\n new_value = vector_1.coordinates[index] - vector_2.coordinates[index]\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def __sub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n return self.from_points(\n a - b for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a - b for a in self)", "def sub(self,v2): \n n = len(self.a)\n m = len(v2.a)\n c = []\n if n != m:\n print(\"Incompatible Types\")\n return\n\n for i in range(n):\n c.append(self.a[i]-v2.a[i])\n\n return c", "def sub(x, y):\r\n\r\n return x + (-y)", "def subtract_vectors(u, v):\n return u[0] - v[0], u[1] - v[1], u[2] - v[2]", "def subtract(x, y):\n\n return x - y", "def subtract(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def test_subtraction__vector_vector(self):\n\n a1 = vectors.Vector(3, 2, 1)\n a2 = vectors.Vector(5, 6, 7)\n\n a3 = a1 - a2\n\n self.assertEqual(a3, vectors.Vector(-2, -4, -6))", "def sub4(a,b):\n return [a[0]-b[0],a[1]-b[1],a[2]-b[2],a[3]-b[3]]", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def __sub__(self, other):\n return Point([c1 - c2 for (c1, c2) in zip(self, other)])", "def __sub__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] - other[i]\n\n return v", "def substract(x, y):\n return y - x", "def subtract(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n if len(vec2) != len(self):\n raise DifferentLengthVectors(self, vec2)\n\n return Vector(*[self[i]-vec2[i] for i in range(len(self))])", "def subtraction(a, b):\n return a - b", "def __sub__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x - other, self._vector.y - other, self._vector.z - other))\n return self", "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def subtraction(x, y):\n return x - y", "def v3minus(a, b):\n return [a[i] - b[i] for i, j in enumerate(a)]", "def test_isub_with_vec_argument(self):\n\n a = Vec3(2, 4, 6)\n b = Vec3(1, 2, 3)\n\n a -= b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(a, expected_result)", "def subtract(vector01,vector02): \r\n result = [[0] for row in range(len(vector01))]\r\n # Creates list full of 0s with the same lenght as vector01\r\n for z in range(len(vector01)):\r\n # for loop which continues as long as there are more elements in vector01 \r\n result[z] = vector01[z]-vector02[z]\r\n # the subtraction of each element which replaces the corresponding element in the vector full of 0s \r\n return result", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def sub(self, other):\n return self._new_rep(self.rep - other.rep)", "def sub(n1, n2):\n return n1 - n2", "def sub(num1, num2):\n return num1 - num2", "def sub(num1, num2):\n return num1 - num2", "def sub(num1, num2):\n return num1 - num2", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def __sub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] - ox, self[1] - oy))", "def vector_between_points(a, b):\n vector_1 = Vector(*a)\n vector_2 = Vector(*b)\n return vector_1 - vector_2", "def __sub__(self, other):\n return Point(self.x - other[0], self.y - other[1])", "def getVector(c1, c2):\n return [c1[0] - c2[0], c1[1] - c2[1], c1[2] - c2[2]]", "def __isub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n self._vectors = [a - b for a, b in zip(self, other)]\n return self\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n self._vectors = [a - b for a in self]\n return self", "def test_sub_with_vec_argument(self):\n\n from pedemath.vec3 import sub_v3\n\n a = Vec3(2, 4, 6)\n b = Vec3(1, 2, 3)\n\n result = sub_v3(a, b)\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(result, expected_result)", "def sub(x, y):\n\n z = [0 for i in range(len(x))]\n for i in range(len(x)):\n z[i] = z[i] + x[i]\n if (i < len(y)):\n z[i] -= y[i]\n if (z[i] < 0):\n z[i] += 10\n z[i + 1] -= 1\n while (len(z) > 1 and z[len(z) - 1] == 0):\n z.pop()\n return z", "def __rsub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (ox - self[0], oy - self[1]))", "def vector(p0, p1):\n a = p1[0] - p0[0]\n b = p1[1] - p0[1]\n return (a, b)", "def sub_vectors(d1,d2):\n #creates a new dictionnary to deep copy the inputs\n empty_dict = {}\n \n #creates a deep copy of d1 into an empty dictionnary\n for key in d1:\n empty_dict[key] = d1[key]\n #checks if any keys if a value of 0\n if empty_dict[key] == 0:\n #if so we would delete that key\n del empty_dict[key]\n \n #iterates through the second vector\n for key in d2:\n #checks if the current key in d2 is in d1\n if key in d1:\n #if so we would substract the value of d1 by d2\n empty_dict[key] = d1[key] - d2[key]\n #checks if the current key has the value of 0\n if empty_dict[key] == 0:\n #if so we would delete that key \n del empty_dict[key]\n \n elif key not in d1:\n #creates a new key in the new dictionnary if current key not found in d1\n empty_dict[key] = -d2[key]\n #checks if the current key has a value of 0\n if empty_dict[key] == 0:\n #deletes the current key has a value of 0\n del empty_dict[key]\n \n #returns new substracted dictionnary \n return empty_dict", "def __sub__(self, other):\n return self.subtract(other)", "def Sub(a, b):\n\tRequire(a>=b)\n\treturn a-b", "def sub(x, y) :\r\n z = y - x\r\n # The checker automatically proves z < 0 and z + x == y.\r\n return y", "def subtract(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item - w_item for v_item, w_item in zip(v, w)]", "def sub(self):\n a = self.pop()\n b = self.pop()\n c= b-a\n self.push(c)", "def sub(self, other):\n return Coord([self.x - other.x, self.y - other.y])", "def sub(A, B):\n A._check('-', B, A.shape, B.shape)\n return A.from_rep(A.rep.sub(B.rep))", "def sub_(self, vector: 'ModelParameters'):\n for idx in range(len(self)):\n self.parameters[idx] -= vector[idx]", "def __rsub__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return other - obj\n newValue = other.value - self.value\n\n return self._newMV(newValue)", "def test_subtract_different_sizes():\n Vector(1.0) - Vector(2.0, 3.0)", "def sub(a: Decimal, b: Decimal) -> Decimal:\n return a - b", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)", "def __rsub__(self, other):\n if isinstance(other, Seq2) or isinstance(other, tuple):\n if len(self) == len(other):\n return other.from_points(\n b - a for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n return NotImplemented", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def subtrai(x, y):\n assert isinstance(x, (int, float)), \"x precisa ser int ou float\"\n assert isinstance(y, (int, float)), \"y precisa ser int ou float\"\n return x - y", "def subtraction(self, first_value, second_value):\n return first_value - second_value", "def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def subtract(minuend, *values):\r\n result = minuend\r\n for value in values:\r\n result -= value\r\n return result", "def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))", "def __sub__(self,other):\n return np.linalg.norm(self.ngdv-other.ngdv)", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v, w)]", "def sub(self, first, second):\n try:\n if isinstance(second, str):\n second = self._variables[second]\n self._variables[first] -= second\n except:\n print(f\"Could not subtract {first} - {second}\")", "def minus(self, a, b):\n return a - b", "def sub64(a,b):\n return(np.subtract(a, b, dtype=np.uint64))", "def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self, other)]\n return self", "def __sub__(self, other):\n n = self.size\n if n != other.size:\n raise ValueError(\"The permutations must be of equal size.\")\n a = self.inversion_vector()\n b = other.inversion_vector()\n result_inv = [(a[i] - b[i]) % (n - i) for i in xrange(n - 1)]\n return Permutation.from_inversion_vector(result_inv)", "def test_subtraction__vector_point(self):\n\n a1 = points.Point(3, 2, 1)\n a2 = vectors.Vector(5, 6, 7)\n\n a3 = a1 - a2\n\n self.assertEqual(a3, points.Point(-2, -4, -6))", "def subtract(*args):\n return args[0] - reduce(lambda x, y: x + y, args[1:])", "def difference_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:\n return vectorops.mul(vectorops.sub(a,b),1.0/dt)" ]
[ "0.8428072", "0.8130589", "0.81168246", "0.80479693", "0.7928738", "0.7876339", "0.78062", "0.7775048", "0.77688444", "0.7703792", "0.7700997", "0.7661297", "0.76243055", "0.76243055", "0.76243055", "0.7616531", "0.7509781", "0.7507299", "0.7501865", "0.7447214", "0.7437817", "0.74181235", "0.7406448", "0.73655885", "0.73607874", "0.7325287", "0.73193175", "0.72883755", "0.72571737", "0.72237587", "0.7188426", "0.7182412", "0.7182412", "0.71642935", "0.71201515", "0.71058637", "0.71058637", "0.71058637", "0.7104372", "0.7099737", "0.70934916", "0.70803326", "0.7069137", "0.7058001", "0.70572203", "0.70445883", "0.70215625", "0.70065784", "0.6953177", "0.6948262", "0.6934183", "0.69286704", "0.69267094", "0.69267094", "0.69267094", "0.6924553", "0.69172776", "0.6893598", "0.68826884", "0.6879887", "0.6836595", "0.6800743", "0.67923313", "0.6790998", "0.67832017", "0.6770693", "0.67668307", "0.6766318", "0.67487717", "0.6746861", "0.6741012", "0.67374", "0.6728906", "0.6713437", "0.6710612", "0.6677837", "0.6653909", "0.66316944", "0.66316944", "0.6630257", "0.6629948", "0.66249764", "0.66249764", "0.66249764", "0.6623666", "0.66227454", "0.6621456", "0.66138613", "0.66079456", "0.66072613", "0.66040987", "0.6598265", "0.6596879", "0.65866137", "0.6570612", "0.6568992", "0.6557471", "0.65564895", "0.655602", "0.6551986" ]
0.8258668
1
scale(Vector,Float) scales (multiplies) a vector by a factor
def scale(first,scalar): if isinstance(first,FreeCAD.Vector): return FreeCAD.Vector(first.x*scalar, first.y*scalar, first.z*scalar)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_vector(vector, scale):\n return vector[0] * scale, vector[1] * scale, vector[2] * scale", "def scale_vector(vector, f):\n f = float(f)\n return [vector[0] * f, vector[1] * f, vector[2] * f]", "def scale(s: (float, int), v: Vector) -> Vector:\n coords = list()\n res = Vector(coords)\n for i in range(len(v.coords)):\n res.coords[i] *= s\n return res", "def apply_scale( vectors, scale ):\n # create a scaling matrix\n matrix = numpy.array([\n [ scale[ 0 ], 0.0, 0.0 ],\n [ 0.0, scale[ 1 ], 0.0 ],\n [ 0.0, 0.0, scale[ 2 ] ]\n ])\n return numpy.dot( vectors, matrix )", "def scale(v: float, a: float, b: float, c: float, d: float) -> float:\n v01 = (v - a) / (b - a)\n return c - v01 * (c - d)", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def scale(self, factor):\n self.b = factor * self.b", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def scale(self, factor):\n return BSplineFunc(self.kvs, self.coeffs * factor)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def scale_vectors(vectors, f):\n return [scale_vector(vector, f) for vector in vectors]", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scale(x, minimum, maximum):\n return (x - minimum) / (maximum - minimum)", "def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def scaling(mat, factor):\n\treturn mat / (mat + factor)", "def apply_scale( vertices, scale=1.0 ):\n checkVerticesValidity( vertices )\n if type(scale) != float:\n raise ValueError\n \n for i in range(len(vertices)):\n v = vertices[i]\n tmpv = [v[0]*scale, v[1]*scale, v[2]*scale]\n vertices[i] = tmpv", "def scale(data, factor):\n\n if np.ndim(data) != 2: # only process one IV dataset at a time\n raise IndexError('Incorrect data format')\n\n if np.size(data, 0) < np.size(data, 1):\n data = data.T # make sure data is in columns\n\n # match data types for float multiplication/division\n new_data = data.copy().astype(float)\n\n new_data[:, 1] *= factor\n\n return new_data", "def scale(c, scalar):\n return [c[0]*scalar, c[1]*scalar]", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def scale(self, const):\n return Vector(*[self[i]*const for i in range(len(self))])", "def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self", "def scale(x, p=2, inplace=False):\n return x / np.linalg.norm(x, ord=p)", "def scale(val, src, dst):\r\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(self, s):\n for n in range(len(self.mV)):\n self.mV[n] *= s\n return self", "def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x", "def scale(self, sf):\n self.scale(sf, sf)", "def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new", "def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.x *= scale_factor\n self.y *= scale_factor\n self.width *= scale_factor\n self.height *= scale_factor\n\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.x *= scale_x\n self.y *= scale_y\n self.width *= scale_x\n self.height *= scale_y", "def scale_point(point, centroid, scale):\n point = np.asarray(point)\n centroid = centroid[:2]\n vector = ((point - centroid)*scale) + centroid\n return vector", "def scale(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"scale(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeMul(value,factor)\n return seriesList", "def mults(self, s):\n prod = Vector.fromSequence(self.mV)\n prod.scale(s)\n return prod", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)", "def scale(self):", "def scale(x, a=5, b=10, xmin=-1, xmax=1):\n return (b - a)*(x - xmin)/(xmax - xmin) + a", "def scale(self, x, y, z) -> None:\n ...", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.width *= scale_factor\n self.height *= scale_factor\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.width *= scale_x\n self.height *= scale_y", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale(self, factor: float) -> Point:\n return Point(self.x * factor, self.y * factor)", "def scale(v: InputTensor) -> t.Tensor:\n v = util.to_tensor(v, dtype=t.float32)\n assert len(v.shape) == 1\n return t.diag(t.cat([v, v.new_ones([1])], dim=0))", "def scalepos(pos):\n return pos[0] * scalefactor, pos[1] * scalefactor", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y", "def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )", "def scale(self, other):\n return Vector(other * self.x, other * self.y)", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def vec_scale (x, alpha):\n return [x_i*alpha for x_i in x]", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)", "def scale(image, maxval=1024):\n image += maxval # minimum value is now 0\n image /= maxval*2\n\n return(image)", "def _scale(x, axis=None):\n x = _remove_baseline(x, axis=axis)\n x /= np.std(x, ddof=1, axis=axis, keepdims=True)\n return x", "def scale_uniform(self, s: float):\n self.vertices = [v * s for v in self.vertices]\n return self", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def scale(self,\n factor_x: Scalar,\n factor_y: Optional[Scalar] = None) -> 'Multipoint[Scalar]':\n return self._context.scale_multipoint(\n self, factor_x, factor_x if factor_y is None else factor_y\n )", "def scale_factor(self, z = 0.):\n return 1./(1.+z)", "def scale(val, src, dst):\n try:\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]\n except ZeroDivisionError:\n return 0.0", "def scale(f, a, j=0):\n return f.per(dmp_scale_in(f.rep, f.dom.convert(a), j, f.lev, f.dom))", "def xscale(value):\n impl.xscale(**locals())", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def scale(self, factor):\n self.x *= factor\n self.y *= factor\n for a in self.annotations:\n a.scale(factor)", "def _call_scaleAdd(vecObj, vec2, sc1, sc2):\n res = vecObj.scaleAdd(vec2, sc1, sc2)\n return res", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def setScaling(factor=1.0):\n dislin.sclfac(factor)", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def __scale_constraint(c, v):\n if c.equality:\n c.set_value((c.lower * v, c.body * v))\n else:\n c.set_value(\n (__none_left_mult(c.lower, v), c.body * v, __none_left_mult(c.upper, v))\n )", "def scale(c,v,p):\n scaleval = min([coeff.valuation(p) for coeff in c.coefficients()])\n if scaleval > 0:\n c = c/(p**scaleval)\n v = v - scaleval\n if v <= 0:\n flag = False\n else:\n flag = True\n return [flag,c,v]", "def test_vec3_scale(self):\n\n vec = Vec3(1, 2, 5)\n\n vec.scale(2)\n\n self.assertEqual(Vec3(2, 4, 10), vec)", "def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n self.page.scale(scale_factor)\n for token in self.tokens:\n token.scale(scale_factor)", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def scaleValues(values):\n\n values = values - values.min()\n return values/values.max()", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def scale(self, sval: complex) -> None:\n self.coeff = self.coeff.astype(numpy.complex128) * sval", "def scale(self, alpha):\n\t\tc = SparseVector(self.d)\n\t\tfor i in self.st.keys():\n\t\t\tc.put(i, alpha*self.get(i))\n\t\treturn c", "def compute_scale(self, box, plane):\n center, normal = plane\n vertex_dots = [np.dot(vertex, normal) for vertex in box[1:]]\n vertex_dots = np.sort(vertex_dots)\n center_dot = np.dot(center, normal)\n scales = center_dot / vertex_dots[:4]\n return np.mean(scales)", "def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def scale(self,n,d=1):\r\n\t\t\r\n\t\t# scale all terms\r\n\t\ts = [i.scale(n,d) for i in self]\r\n\t\t\r\n\t\treturn Li(s)", "def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return", "def scalarMultiplication(self, factor):\n components = self.components() * factor\n return Vector.initializeFromComponents(components)", "def scale(x, feature_range=(-1, 1)):\n \n # scale from 0-1 to feature_range\n min, max = feature_range\n #x = x * (max - min) + min\n #x = torch.add(torch.mul(x, (max-min)), min)\n x = x.mul(max-min).add_(min)\n return x", "def scale(self, column_name, factor):\n self.check_for_column(column_name)\n self.data[column_name] *= factor", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsContext_Scale(*args, **kwargs)", "def scale(curve):\n return curve/rmsd(curve)", "def apply_direction_scale( vectors, direction, scale ):\n \"\"\"\n scaling is defined as:\n \n [p'][1 + (k - 1)n.x^2, (k - 1)n.x n.y^2, (k - 1)n.x n.z ]\n S(n,k) = [q'][(k - 1)n.x n.y, 1 + (k - 1)n.y, (k - 1)n.y n.z ]\n [r'][(k - 1)n.x n.z, (k - 1)n.y n.z, 1 + (k - 1)n.z^2 ]\n \n where:\n v' is the resulting vector after scaling\n v is the vector to scale\n n is the direction of the scaling\n n.x is the x component of n\n n.y is the y component of n\n n.z is the z component of n\n k is the scaling factor\n \"\"\"\n scaleMinus1 = scale - 1\n matrix = numpy.array(\n [\n # m1\n [\n # m11 = 1 + (k - 1)n.x^2\n 1 + scaleMinus1 * (direction[ 0 ]**2),\n # m12 = (k - 1)n.x n.y^2\n scaleMinus1 * direction[ 0 ] * direction[ 1 ]**2,\n # m13 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ]\n ],\n # m2\n [\n # m21 = (k - 1)n.x n.y\n scaleMinus1 * direction[ 0 ] * direction[ 1 ],\n # m22 = 1 + (k - 1)n.y\n 1 + scaleMinus1 * direction[ 1 ],\n # m23 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ]\n ],\n # m3\n [\n # m31 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ],\n # m32 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ],\n # m33 = 1 + (k - 1)n.z^2\n 1 + scaleMinus1 * direction[ 2 ]**2\n ]\n ],\n dtype = numpy.float\n )\n \n return numpy.dot( vectors, matrix )", "def _scale(waveform):\n # Get random scale factor\n scale_factor = tf.random_uniform(shape=[], minval=0.5, maxval=2.5, dtype=tf.float32)\n\n return waveform * scale_factor" ]
[ "0.8370318", "0.7960726", "0.7441456", "0.7425613", "0.7421588", "0.74008507", "0.7352211", "0.7281855", "0.725304", "0.72357947", "0.7201499", "0.7179996", "0.7178041", "0.7161515", "0.713151", "0.71268904", "0.70673966", "0.6991172", "0.6965906", "0.69597846", "0.6956664", "0.6940259", "0.68966645", "0.687821", "0.6856291", "0.68413806", "0.6806297", "0.6785435", "0.677538", "0.6757675", "0.67501706", "0.67443067", "0.67443067", "0.6731822", "0.6730544", "0.6680853", "0.6670294", "0.66616553", "0.6653676", "0.66206634", "0.66182226", "0.6615427", "0.65889573", "0.6580919", "0.6580363", "0.65582705", "0.65360653", "0.6533207", "0.65304524", "0.65184164", "0.65084684", "0.6483519", "0.64422417", "0.64313054", "0.6428779", "0.64287686", "0.6427942", "0.64222866", "0.63973147", "0.6386004", "0.6385547", "0.63749003", "0.637265", "0.63638973", "0.63502026", "0.6336553", "0.6335688", "0.63319385", "0.6327286", "0.63195544", "0.6297488", "0.62792116", "0.62703604", "0.6262732", "0.6250745", "0.6240813", "0.6236999", "0.6232596", "0.6230739", "0.6224972", "0.6219502", "0.62180823", "0.6205246", "0.6202908", "0.61995", "0.61967725", "0.6196631", "0.6188822", "0.61673033", "0.61649925", "0.6147735", "0.61322623", "0.61188734", "0.6118199", "0.6109491", "0.61075294", "0.60970956", "0.6095719", "0.6093986", "0.60893077" ]
0.7347928
7
lengh(Vector) gives vector length
def length(first): if isinstance(first,FreeCAD.Vector): return math.sqrt(first.x*first.x + first.y*first.y + first.z*first.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length", "def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0", "def vector_len( vector ):\n \n if not isinstance(vector, np.ndarray ):\n return len(vector)\n else:\n shape = vector.shape # shape is a tuple\n \n sl = len(shape)\n if sl == 0:\n return 0\n elif sl == 1:\n return shape[0]\n else: \n non_one_dims = [ s for s in shape if s > 1 ]\n non_one_dims_len = len(non_one_dims)\n if non_one_dims_len > 1:\n raise ValueError(\"Function vector_len: Not a vector provided, shape : %s\", shape)\n elif non_one_dims_len == 0:\n return 1\n else:\n return non_one_dims[0]", "def length(vec):\n\n return math.sqrt(dotproduct(vec, vec))", "def length(vector):\n a, b, c = vector\n return math.sqrt(a ** 2 + b ** 2 + c ** 2)", "def get_vector_length(vector):\n return np.linalg.norm(vector)", "def length(vec):\n return vec.dot(vec)**.5", "def length(v):\n return math.sqrt(v[0]**2 + v[1]**2)", "def length(vec):\n return np.linalg.norm(vec)", "def __len__(self):\n return self._vector.degree()", "def get_vector_length(v):\r\n v = as_tensor_variable(v)\r\n if v.ndim != 1:\r\n raise TypeError('argument must be symbolic vector')\r\n if v.type.broadcastable[0]:\r\n return 1\r\n if isinstance(v, gof.Constant) and v.type.ndim == 1:\r\n return len(v.data)\r\n if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):\r\n return len(v.owner.inputs)\r\n if v.owner and isinstance(v.owner.op, Shape):\r\n return v.owner.inputs[0].type.ndim\r\n raise ValueError(\"length not known\")", "def len(self):\n return math.sqrt(self.v[0] * self.v[0] + self.v[1] * self.v[1])", "def __len__(self):\n return len(self._representation_vector)", "def veclength(vec):\n vec = np.array(vec, copy=False).reshape(-1, 3)\n return np.sqrt(np.einsum('ij,ij->i', vec, vec))", "def length_vector(v):\n return sqrt(dot_vectors(v, v))", "def __len__(self):\n return len(self.centroid_vector)", "def Length(self) -> int:", "def Length(self) -> int:", "def getLength(self):\n return self.vector.norm", "def numel(self):\n return self.t.size", "def length(self):\n return len(self.x)", "def length(self):\n length = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i:\n length += 1\n return length", "def length(self):\n return pyvista.Box(self.bounds).length", "def nvar(self):\n return len(self.v)", "def __len__():", "def __len__():", "def __len__():", "def __len__(self):\n # TODO: Properly account for indices which can't be used, as in\n # random_batch's check.\n return max(0, self.size - self.phi_length)", "def dim_from_concatenated_vector(v):\n return int(np.sqrt(v.shape[0] - 1))", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def length(self):\n pass", "def length(self):\n\t\treturn self.n", "def __len__(self):\n return self.last - self.first + 1", "def Length(data):\n return len(data)", "def __len__(self):\n return self.lengths[0]", "def size(self) -> int:", "def __len__(self):\n return self.__length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def length(self):\n if self.is_null():\n return 0\n return self.end - self.begin", "def __len__(self):\n return len(self[0]) + len(self[1])", "def actual_size(v=(0, 0)):\n return v[0], v[1]", "def __len__(self):\n return self.end - self.begin", "def get_length(array):\n return len(list(array))", "def total_length():\n return", "def __nonzero__(self):\n return _uhd_swig.uhd_size_vector_t___nonzero__(self)", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def length(self):\n ...", "def __len__(self) -> int:\n return self._length", "def __len__(self) -> int:\n return len(self.length)", "def __len__(self) -> int:\n return len(self.value)", "def length(self): # Class O(n)\r\n h = self.head\r\n size = 1\r\n while 'next' in dir(h.next):\r\n size += 1\r\n h = h.next\r\n return size", "def __len__(self):\n return np.size(self.A,0)", "def _call_size(vecObj):\n res = vecObj.size\n return res", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def length(v: Vec2) -> float:\n return sqrt(v.x ** 2 + v.y ** 2)", "def __len__(self):\n return self.llSize", "def length(self):\n return _lattice.length(self._accelerator.lattice)", "def flen(v):\n\treturn value_bits_sign(v)[0]", "def __len__(self):\n return self._length # pylint: disable = E1101", "def __len__(self):\n if self.__indices__ is not None:\n return len(self.__indices__)\n return self.len()", "def length(self):\n return self.linked_list.length()", "def __len__(self):\n if not hasattr(self, 'hdu_list'):\n self.update_hdu_list()\n return len(self.hdu_list)", "def __len__(self):\n\n return self.length", "def __len__(self) -> float:\n return len(self.elements)", "def length(self):\n return self.count", "def __len__(self) -> int:\n return self.length", "def size(self):\r\n return self.__length", "def __len__(self):\n # TODO: complete this function!\n if self.is_empty():\n return 0\n else:\n return 1 + len(self._rest)", "def len(x) -> int:\n pass", "def size(A):\n\treturn (len(A[0]),len(A))", "def v_size(self) -> int:\n return len(self.Nodes)", "def __len__(self) -> int:\n return self.disp_size ** 2", "def DLEN(self):", "def length(self) -> int:\n pass", "def length(self):\n return self.heap.size()", "def __len__(self):\n return(len(self.__d))", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def __len__(self): # pragma: no cover\n return self.size()", "def calculate_length(self):\n raise NotImplementedError", "def __len__(self):\n return self._length", "def vectorLength(v1, v2=None):\n if v2 is None:\n v2 = v1\n return math.sqrt(dotProduct(v1, v2))", "def length(self):\n return self.length", "def __len__(self) -> int:\n return self._len", "def __len__(self) -> int:\n return len(self.variables)", "def V_length(atoms):\n \n Vl = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(N):\n j = (i+1) % N\n length = norm(atoms.coords[i] - atoms.coords[j]) # norm computes the length of a vector\n \n Vl += (length - L0)**2\n \n return Vl", "def len2(x):\n \n if hasattr(x, '__len__'):\n \n length = len(x)\n \n elif isinstance(x, (int,float,long,complex)):\n \n length = 1\n \n return length", "def __len__(self):\n return sum(self.size_freqs.values())", "def length(self):\n return Integer(len(self._g))", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def length(value):\n\n # Try to return the length\n return len(value)" ]
[ "0.76649845", "0.7522427", "0.7505321", "0.74064803", "0.73988277", "0.7376112", "0.7350648", "0.7289443", "0.7279308", "0.72593445", "0.7243668", "0.71736383", "0.70988554", "0.70317763", "0.69320846", "0.69186604", "0.69020325", "0.69020325", "0.6873299", "0.6854931", "0.68184966", "0.6776031", "0.6750503", "0.6731353", "0.6715451", "0.6715451", "0.6715451", "0.6714928", "0.6705839", "0.66948235", "0.66948235", "0.66948235", "0.6660843", "0.66504824", "0.6610653", "0.6601827", "0.6599104", "0.6593797", "0.6588523", "0.6576923", "0.6576923", "0.6576923", "0.655928", "0.6558353", "0.6555238", "0.6547774", "0.65291584", "0.6525685", "0.6523613", "0.6519338", "0.6503413", "0.6501715", "0.65000397", "0.6490832", "0.6477268", "0.64728004", "0.6472131", "0.6469101", "0.6469101", "0.6469101", "0.6469101", "0.6469101", "0.6469101", "0.64671314", "0.6461271", "0.64532745", "0.645268", "0.64460206", "0.6442745", "0.64416534", "0.6433089", "0.64219683", "0.6415252", "0.64108145", "0.64060307", "0.6395306", "0.63915145", "0.6389657", "0.6389212", "0.63889265", "0.63875365", "0.63837564", "0.63821393", "0.6377301", "0.63750327", "0.63740116", "0.63740116", "0.6364323", "0.63615113", "0.6360582", "0.63598025", "0.635869", "0.63571024", "0.6352167", "0.63509035", "0.6345908", "0.6345727", "0.63439924", "0.63409394", "0.6340614" ]
0.71695125
12
dist(Vector,Vector) returns the distance between both points/vectors
def dist(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return length(sub(first,other))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d", "def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5", "def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)", "def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))", "def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))", "def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)", "def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def distance(p1, p2):\n return np.linalg.norm(p2-p1)", "def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])", "def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)", "def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def distance(p1, p2):\n return math.hypot(p2[0] - p1[0], p2[1] - p1[1])", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def distance(self, vector1, vector2):\n\t\tsum_sq = 0\n\t\tfor i in range(28):\n\t\t\tfor j in range(28):\n\t\t\t\tsum_sq += (vector1[i][j] - vector2[i][j])**2\n\t\treturn math.sqrt(sum_sq)", "def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n\n return math.sqrt(dx*dx + dy*dy)", "def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(v1, v2):\r\n return magnitude(*subtract(v2, v1))", "def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)", "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "def GetDistance(vec1,vec2):\n diff = np.asarray(vec1) - np.asarray(vec2)\n squareDistance = np.dot(diff.T, diff)\n return math.sqrt(squareDistance)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)", "def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)", "def dist(self, point_a, point_b):\n args = {\"point_a\": point_a, \"point_b\": point_b}\n dists = gs.array(self._iterate_over_factors(\"dist\", args))\n return gs.linalg.norm(dists, ord=2, axis=0)", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def distance(self, a, b):\n raise NotImplementedError()", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def distance(p1,p2):\r\n x1,y1 = p1\r\n x2,y2 = p2\r\n return hypot(x2 - x1, y2 - y1)", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)", "def _pairwise_dist(self,s1,s2):\n\n return 0.0", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def distance(p1,p2):\n x1,y1 = p1\n x2,y2 = p2\n return hypot(x2 - x1, y2 - y1)", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def distance(x, y):\n dist = [pow((x-y), 2) for x, y in zip(x,y)]\n dist = math.sqrt(sum(dist))\n \n return dist", "def distance(a, b):\n if len(a) > len(b):\n a = a[:len(b)]\n elif len(b) > len(a):\n b = b[:len(a)]\n\n ar = numpy.array(a)\n br = numpy.array(b)\n dist = numpy.linalg.norm(ar-br)\n\n return dist", "def dist(self,x, y):\n\n x1, y1 = x\n x2, y2 = y\n return np.sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))", "def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def distance(A, B):\n return abs(A - B)", "def distance(A, B):\n return abs(A - B)", "def hellinger_dist(v1, v2):\n if len(v1) != len(v2):\n raise ValueError(\"Vectors should have the same size! \")\n return sqrt( sum( map(lambda e: \n (sqrt(e[0])-sqrt(e[1]))**2, zip(v1,v2))))/sqrt(2)", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist", "def dist(self, other: \"Vector\", sqr=False) -> float: #distance between 2 vectors\n if sqr:\n return (self-other).sqr_mag()\n return (self-other).mag()", "def dist(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n return (self - vec2).magnitude()", "def _dist(A, B):\n return np.sqrt(np.einsum(\"ijk->ij\", (A[:, None, :] - B) ** 2))", "def distance(p1, p2):\n\n \"\"\"\n (p1[0] - p2[0]) ** 2 + \n (p1[1] - p2[1]) ** 2 + \n \"\"\"\n sum_all = 0\n for i, v in enumerate(p1):\n diff_squared = (v - p2[i]) ** 2\n sum_all += diff_squared\n return(math.sqrt(sum_all))", "def _dist(x, y):\n return np.sqrt(np.mean(np.square(x - y)))", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def get_distance(descriptive_vector1, descriptive_vector2 ):\n return np.linalg.norm(descriptive_vector1 - descriptive_vector2)", "def distance(d1, d2):\n projection_onto_plane = d2 - projection(d1, d2)\n dist = np.linalg.norm(projection_onto_plane)\n\n return dist", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def __dist(u, v):\n return spatial.distance.euclidean(u, v)", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def dist(x1, x2, distance):\n if distance == 'l2':\n return np.sqrt(np.sum(np.square(x1 - x2)))\n elif distance == 'squared_l2':\n return np.sum(np.square(x1 - x2))\n else:\n raise Exception(\"The distance '%s' is not supported.\" % distance)", "def distance(self,x,y,**kwargs):\n pass", "def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)", "def distance_between_two_points(p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(p1, p2):\n distance = 0\n for i in range(len(p1)):\n distance += (p1[i]-p2[i])**2\n return distance" ]
[ "0.8330045", "0.7836225", "0.7793538", "0.7769077", "0.7764226", "0.76751816", "0.7628056", "0.7599897", "0.758881", "0.75808233", "0.75630975", "0.75542706", "0.7522357", "0.7487544", "0.7484254", "0.74748534", "0.74724805", "0.7463907", "0.74554265", "0.7436666", "0.7422981", "0.74204326", "0.7411742", "0.74073046", "0.7404597", "0.7399842", "0.739885", "0.73905456", "0.7377987", "0.736897", "0.7355408", "0.7351707", "0.7329268", "0.7327916", "0.7326522", "0.7322476", "0.73012257", "0.7300572", "0.7295958", "0.7291975", "0.72910976", "0.7279496", "0.7270636", "0.725602", "0.7245564", "0.7244271", "0.7244271", "0.72384554", "0.7235725", "0.7233232", "0.72215974", "0.72195876", "0.72108084", "0.72000754", "0.71984047", "0.71974146", "0.71797854", "0.7177501", "0.71707207", "0.71707207", "0.71681315", "0.7155018", "0.7153652", "0.71499664", "0.7144418", "0.71398777", "0.7138165", "0.7120466", "0.71184987", "0.7114603", "0.7113956", "0.71087897", "0.70887375", "0.70733404", "0.70724386", "0.7069487", "0.7065681", "0.7062884", "0.7053119", "0.7053119", "0.70517135", "0.7049879", "0.7048717", "0.7038356", "0.7035891", "0.70102143", "0.7003118", "0.6992286", "0.6990667", "0.6985516", "0.6984684", "0.69733685", "0.69710904", "0.6965767", "0.69588804", "0.6956042", "0.6952838", "0.6952751", "0.69490355", "0.6940752" ]
0.77351147
5
normalized(Vector) returns a unit vector
def normalized(first): if isinstance(first,FreeCAD.Vector): l=length(first) return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def normalize(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def _unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def unit_vector(vector):\n vector = np.array(vector)\n if np.linalg.norm(vector) <= 0.00010:\n normv = 1.0\n else:\n normv = np.linalg.norm(vector)\n return vector / normv", "def get_normalized_vector(vector):\n # WARN: Zero length may cause problems!\n vector_lenght = get_vector_length(vector)\n if vector_lenght != 0:\n return np.divide(vector, get_vector_length(vector))\n else:\n return [0, 0]", "def normalized(vec):\n l = norm(vec)\n if l != 0.0:\n return vec / l\n else:\n raise ArithmeticError('Zero vector can\\'t be normalized!')", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def get_unit_vector(self, vector):\n return vector / la.norm(vector)", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def testNormalize(self):\n v1 = Vector.ones(4)\n n = v1.norm()\n assert n == 2\n assert v1.normalize() == [ 0.5, 0.5, 0.5, 0.5 ]", "def unit_vector(vector):\n return vector / max(np.linalg.norm(vector), 1e-10)", "def normalize(self):\n\t\tnorm = self.norm()\n\t\tif norm == 0:\n\t\t\traise ValueError(\"Can't normalize zero vector\")\n\t\treturn self / norm", "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def cal_unit_vec(vector):\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def normalize(self, vec):\n length = math.sqrt( vec[0,0]*vec[0,0] + vec[0,1]*vec[0,1] + vec[0,2]*vec[0,2] )\n vnorm = vec / length\n return vnorm", "def normalize_vector(vector):\n v = np.divide(vector, np.linalg.norm(vector))\n return np.round(v, decimals=4)", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def unit_vector(vector):\n if not np.all((vector == 0)):\n return vector / np.linalg.norm(vector)\n else:\n return vector", "def normalize_vector (vector ):\r\n\r\n if (np.sum (vector ) == 0):\r\n #print (\"In normalize_vector: Vector is 0. Returning input vector.\")\r\n return vector\r\n\r\n return vector / np.linalg.norm(vector)", "def normalized(self):\n return self.from_points(\n vector.normalized() for vector in self._vectors)", "def vec_unit( vec ):\r\n return np.divide( vec , np.linalg.norm( vec ) )", "def normalize(v):\n return v / np.linalg.norm(v)", "def normalized(v):\n norm = np.linalg.norm(v)\n if norm:\n return np.array(v) / norm\n else:\n return v", "def normalize(vec):\n return vec / length(vec)", "def normalize(vec):\n min_ = np.min(vec)\n max_ = np.max(vec)\n if min_ != max_:\n n_vec = (vec-min_)/(max_-min_)\n return n_vec\n\n return vec", "def unit_vector(vector):\n unit_vector = np.zeros((len(vector), vector.shape[1]))\n norm = np.linalg.norm(vector, axis=1)\n ndim = vector.ndim\n\n if ndim == 1: # Handling of 1-dimensional array\n unit_vector = vector / norm\n elif ndim == 2: # Handling of 2-dimensional array\n for i in range(0, vector.shape[1]):\n unit_vector[:, i] = vector[:, i] / norm\n else:\n log.fatal(f\"Dimension of vector should be either 1- or 2-dimensional and not {ndim}-dimensional.\")\n\n return unit_vector", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def unit_vector(vector):\n return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])", "def normalize(v):\n return np.array(v) / np.linalg.norm(v)", "def normalized(self):\n return self / self.norm()", "def normalizeVector(v):\n normalizer = 1.0 / sum(v)\n\n normalized = [i * normalizer for i in v]\n return normalized", "def vector_normalize(vec, axis=None):\n mag = vector_magnitude(vec, axis=axis)\n mag = mag if mag > 0 else 1\n if axis is None:\n return vec / mag\n else:\n axis_ix = [None] * len(vec.shape)\n axis_ix[axis] = slice(None, None, None)\n return vec / numpy.array([mag])[axis_ix]", "def unit_vec(v):\n vlen = np.linalg.norm(v)\n if np.isclose(vlen, 0):\n raise ValueError('Cannot make unit vector from zero vector.')\n else:\n return v / vlen", "def normalize(v):\n\n return v * (1.0 / magnitude(v))", "def test_normalize_zero_length_vector(self):\n\n v = Vector({ 'x': 0 })\n v.normalize()\n self.assertEqual({ 'x': 0 }, v.dimensions)", "def normalized(self):\n L = self.length\n if L > pygonal.EPSILON:\n v = tuple.__new__(Vec2, (self[0] / L, self[1] / L))\n v.__dict__['length'] = v.__dict__['length2'] = 1.0\n return v\n else:\n return null", "def test_normalize_vector_space(self):\n\n v = Vector({ 'x': 10 })\n self.assertEqual(VectorSpace, type(v.dimensions))\n v.normalize()\n self.assertEqual(VectorSpace, type(v.dimensions))", "def normalize(self): # Function is fucked TODO\n l = self.length()\n for i in range(0, len(self.coords)):\n self.coords[i] /= l\n return self\n # return Vector(list([0 for i in range(len(v.coords))]))\n\n # if round(self.length() == 0):\n # s = 1 / self.length()\n # return self * s\n # else:\n # return Vector(list([0 for i in range(len(v.coords))]))", "def normalize(my_vector):\n my_vector = np.array(my_vector)\n size = len(my_vector)\n\n sum_ = sum(my_vector)\n if sum_ != 0.0:\n for i in range(size):\n my_vector[i] = my_vector[i] / sum_\n return my_vector", "def normalized(self):\n d = self.magnitude()\n if d:\n return type(self)(self.x / d, self.y / d)\n return self.copy()", "def as_unit(self):\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec", "def Normal(self):\n return Vector(self.normal)", "def norm_vec(vec):\n return vec / norm(vec)", "def magni(vector):\n return(np.linalg.norm(vector))", "def normalize(x):\r\n return x/norm(x)", "def normalized(self):\n d = self.magnitude()\n if d:\n return type(self)(self.x / d, self.y / d, self.z / d)\n return self.copy()", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def vector_normalize(x):\n mag = math.sqrt(vector_dot(x, x))\n return [float(i) / mag for i in x]", "def test_magnitude_normalize(self):\n\n a1 = vectors.Vector(1, 2, 3)\n self.assertEqual(a1.normalize().magnitude(), 1)", "def normalise(vec):\n try:\n return vec / length(vec)\n except ZeroDivisionError:\n return vec", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def vecnorm(X) :\n\tXtemp = X - np.min(X)\n\tXnorm = Xtemp * 2 / np.max(Xtemp) - 1\n\treturn Xnorm", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def normal(self) -> Vector:\n return self._normal", "def norm(vec):\n return np.linalg.norm(vec)", "def unit(vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates the initial value for result of this function, which is a vector full of 0s with the same lenght of a given vector \r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements in the vector \r\n result[z] = vector[z]/norm(vector)\r\n # the new result being each element in the list being divided by the norm \r\n return result", "def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))", "def norm(vec):\n vel = numpy.sqrt(numpy.dot(vec,vec))\n return vel", "def vec_normal(vec):\r\n n = sqrt(sum(x ** 2 for x in vec)) or 1\r\n return [x / n for x in vec]", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def unit(vector: np.array) -> np.array:\n return np.array([*vector]) / np.sqrt((vector * vector).sum(axis=0))", "def norm_min(vector):\n return la.norm(vector, -np.inf)", "def normalize2(vec,norm=1.0):\n vec = check_numpy_array(vec)\n vel = numpy.sqrt(numpy.dot(vec,vec))\n out = (vec/vel)*norm\n return out", "def test_double_normalization(self):\n\n v = Vector({\"x\": 3, \"y\": 1.2, \"z\": -2})\n v.normalize()\n w = v.copy()\n w.normalize()\n self.assertEqual(v.dimensions, w.dimensions)", "def norm(v: Vec2) -> Vec2:\n t = length(v)\n return Vec2(v.x / t, v.y / t)", "def normal(self, u, v):\n result = np.cross(self.du(u, v), self.dv(u, v))\n result = result / np.sqrt(vectordot(result, result))[:, None]\n return result", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def norm(self) -> \"Vector\":\n self.values = tuple(self/self.mag())\n return self", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def normalise1D(*vector):\n\n vector = np.array(vector).flatten() # 1D vector\n\n norm = np.linalg.norm(vector) # vector norm\n if norm == 0: return vector # vector is 0\n return vector/norm", "def normalize(self):\r\n\r\n nlen = 1.0/math.sqrt(self*self)\r\n return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)", "def normalized(self):\n v = self.copy()\n v.normalize()\n return v", "def normalize(v):\n det = math.sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2])\n return [v[0] / det, v[1] / det, v[2] / det]", "def normalize(self, bow):\n vector = matutils.unitvec(bow, self.norm)\n return vector", "def normalize(vectors):\n if len(np.asarray(vectors).shape) == 1:\n return vectors / np.linalg.norm(vectors)\n norm = np.linalg.norm(vectors, axis=1)\n return vectors / norm[:, np.newaxis]", "def nuclearnorm(X):\r\n if X.size == 0:\r\n return 0\r\n return LA.norm(X) if is_vector(X) else LA.norm(X, 'nuc')\r\n\r\n\r\n pass" ]
[ "0.84013855", "0.8248628", "0.8215351", "0.81663924", "0.81663924", "0.81663924", "0.8158663", "0.8142691", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8125412", "0.81115973", "0.8076635", "0.8054125", "0.8038757", "0.79553455", "0.7932586", "0.7875816", "0.7865435", "0.7865189", "0.7843627", "0.783858", "0.782851", "0.7808285", "0.7795583", "0.77954584", "0.7793595", "0.7787559", "0.77741826", "0.7761134", "0.77385694", "0.7686963", "0.76817846", "0.7663472", "0.76617104", "0.7636059", "0.7631071", "0.7578338", "0.75491446", "0.7516696", "0.7455275", "0.74504685", "0.7445472", "0.74431026", "0.7408363", "0.7403175", "0.73519737", "0.7305659", "0.7281611", "0.7258454", "0.72565794", "0.7248715", "0.7245907", "0.7240829", "0.72296816", "0.72271895", "0.72114134", "0.7207777", "0.71868527", "0.7153389", "0.7130242", "0.71274346", "0.71200883", "0.7083824", "0.70757544", "0.70747805", "0.70619863", "0.70558906", "0.7039261", "0.7024052", "0.7012376", "0.70056605", "0.7003523", "0.6992083", "0.6975742", "0.69726074", "0.6970152", "0.69501054", "0.6943827", "0.6937409", "0.6927041", "0.6904576", "0.68829286", "0.6870701", "0.68687934", "0.68576425", "0.6840764", "0.6833307", "0.6827038", "0.6815622", "0.68148494" ]
0.7754226
39
dotproduct(Vector,Vector) returns the dot product of both vectors
def dotproduct(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return (first.x*other.x + first.y*other.y + first.z*other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dotProduct(v1, v2):\n n1 = normalize(v1)\n n2 = normalize(v2)\n return n1[0] * n2[0] + n1[1] * n2[1] + n1[2] * n2[2]", "def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]", "def vector_dot(v1,v2):\n return (v1.x * v2.x) + (v1.y * v2.y) + (v1.z * v2.z)", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n out=np.dot(vector1,vector2)\n ### END YOUR CODE\n\n return out", "def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)", "def dot(vector1, vector2):\n return sum(a1 * a2 for a1, a2 in zip(vector1, vector2))", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def vector_dot(x, y):\n\n if(len(x) != len(y)):\n raise ValueError(\"vector lengths differ\")\n else:\n # return x1*y1+x2*y2+...xn*yn\n return sum([x[i] * y[i] for i in range(len(x))])", "def dot_product(vec_1:tuple, vec_2:tuple)->float:\n return vec_1[0] * vec_2[0] + vec_1[1] * vec_2[1]", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def dot_product(first_vector, second_vector):\n first_unpacker = VectorUnpacker(first_vector)\n second_unpacker = VectorUnpacker(second_vector)\n if first_unpacker.unpacked_vector_length != second_unpacker.unpacked_vector_length:\n raise ApplicationError(\"Unpacked vector sizes are unequal\")\n\n # looks better than a 'map' one-liner to me\n value = 0\n for piece in zip(first_unpacker(), second_unpacker()):\n value += piece[0] * piece[1]\n\n return value", "def dot(vector01,vector02):\r\n result = 0\r\n # creates the initial value for the result of the dot product\r\n for z in range(len(vector01)):\r\n # for loop which continues as long as there are more values left in the vector \r\n result += vector01[z]*vector02[z]\r\n # the new result is found to be the corresponding values in each vector multiplied and then added together \r\n return result", "def dotproduct(vec1, vec2, sum=sum, map=map, mul=mul):\n return sum(map(mul, vec1, vec2))", "def dot_product(vector_1, vector_2):\n result = 0\n for idx_1, value_1 in vector_1.items():\n if idx_1 in vector_2:\n result += value_1 * vector_2[idx_1]\n return result", "def dot_product(a,b):\n return sum(pairwise_mult(a,b))", "def dot(vector_1: List, vector_2: List) -> float:\n if len(vector_1) != len(vector_2):\n raise InvalidInput(error_code_messages[\"InvalidLength\"])\n\n return sum(x * y for x, y in zip(vector_1, vector_2))", "def dot(a, b):\n\n if len(a) != len(b):\n raise Exception(\"Input vectors must be of same length, not %d and %d\" % (len(a), len(b)))\n\n return float(sum([a[i] * b[i] for i in range(len(a))]))", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def get_dot_product(v1,v2):\n #sets default dot product\n dot_product = 0\n \n for key in v2:\n if key in v1:\n # updates the dot product if key is present in both vectors\n dot_product += v1[key]*v2[key]\n #returns final dot product\n return dot_product", "def vdot(a, b):\n return np.vdot(a.ravel(), b.ravel())", "def dot(a, b):\n return np.vdot(a.arr,b.arr)", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def dot_product(v1, v2):\n #print(v1, v2)\n sum = 0\n\n for i in range(len(v1)):\n #print(v1[i], v2[i])\n sum += v1[i] * v2[i]\n return sum", "def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)", "def dot(a, b):\n return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]", "def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))", "def dot_product(a, b):\n a1, a2, a3 = a\n b1, b2, b3 = b\n return a1 * b1 + a2 * b2 + a3 * b3", "def dot(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]", "def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]", "def dot( v1, v2 ):\n return sum( x*y for x,y in izip(v1,v2) )", "def dot(p1, p2):\n return p1[0] * p2[0] + p1[1] * p2[1]", "def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def dot(a, b):\n return sum([a[i]*b[i] for i in range(2)])", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)\n return sum(v1[k]*v2[k] for k in v1 and v2)\n # END_YOUR_CODE", "def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret", "def vecDot(a, b):\n ret=0.0\n for i in range(len(a)):\n ret+=a[i]*b[i]\n return ret", "def dot_product(a, b):\n dp = 0.0\n for i, j in zip(a, b):\n dp += i * j\n return dp", "def dot(a, b):\r\n a, b = as_tensor_variable(a), as_tensor_variable(b)\r\n\r\n if a.ndim == 0 or b.ndim == 0:\r\n return a * b\r\n elif a.ndim > 2 or b.ndim > 2:\r\n return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])\r\n else:\r\n return _dot(a, b)", "def vector_dot(xyz, vector):\n if len(vector) != 3:\n raise Exception(\n \"vector should be length 3, the provided length is {}\".format(\n len(vector)\n )\n )\n return vector[0]*xyz[:, 0] + vector[1]*xyz[:, 1] + vector[2]*xyz[:, 2]", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def dot_product(A, B):\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n\n if (A_columns == B_rows) and (A_rows == 1 and B_columns == 1):\n\n dot_product = []\n \n dot_product.append(sum([A[0][i]*B[i][0] for i in range(A_columns)]))\n\n return float(dot_product)\n \n else:\n print(\"dimensions of vector do not match.\")", "def dot_prod(t1: torch.Tensor, t2: torch.Tensor, verbose: bool = False):\n assert t1.size() == t2.size(), \"Sizes for dot-product must match\"\n return mo.dot_prod(t1, t2, verbose)", "def vector_dot(v, w):\n return np.dot(v, w)", "def dot(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a vector')\n return np.dot(self, vec)", "def dot(x,y):\n\treturn sum([xi*yi for (xi,yi) in zip(x,y)])", "def test_dot_different_sizes():\n dot(Vector(1.0), Vector(2.0, 3.0))", "def test_dot():\n assert_equal(dot(Vector(3.0, 2.0), Vector(2.0, -1.0)), 4.0)", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(x, y):\n res = x[0] * y[0]\n for a, b in zip(x, y):\n res += a * b\n return res", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), \"vectors must be same length\"\n\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def _call_dot(vecObj, vec2):\n res = vecObj.dot(vec2)\n return res", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), 'vectors must be the same length'\n\n return sum(v_item * w_item for v_item, w_item in zip(v, w))", "def sparseVectorDotProduct(a, b):\n # BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)\n ref = a if len(a)>len(b) else b\n return sum(a[key]*b[key] for key in ref)\n # END_YOUR_CODE", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def dot(self, vec):\n pass", "def dotProduct(self, v):\n return self.x * v.x + self.y * v.y + self.z * v.z", "def sparseVectorDotProduct(v1, v2):\n ans = 0\n for index, val in v1.items():\n ans += val * v2[index]\n return ans", "def dot_product(u, v):\n sum_of_products = 0\n if u!= None:\n if v!= None:\n for combo in zip(u, v):\n sum_of_products += (combo[0] * combo[1])\n return sum_of_products", "def dot_product(A, B):\n # Section 1: Ensure A and B dimensions are the same\n rowsA = len(A); colsA = len(A[0])\n rowsB = len(B); colsB = len(B[0])\n if rowsA != rowsB or colsA != colsB:\n raise ArithmeticError('Matrices are NOT the same size.')\n\n # Section 2: Sum the products \n total = 0\n for i in range(rowsA):\n for j in range(colsB):\n total += A[i][j] * B[i][j]\n\n return total", "def dot(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Dot product operand must be a VectorArray')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Dot product operands must have the same '\n 'number of elements.')\n return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)", "def dot_prod(v1, v2):\n dp = 0\n j = 0\n for i in range(len(v1)):\n while j<len(v2) and v1[i][0]>v2[j][0]:\n j = j + 1\n p = 0 if j>=len(v2) or v2[j][0]>v1[i][0] else v2[j][1]*v1[i][1]\n dp = dp + p\n return dp", "def dot(self, other):\n\n Vector = sympy.vector.Vector\n if isinstance(other, BasisDependentZero):\n return Vector.zero\n elif isinstance(other, Vector):\n outvec = Vector.zero\n for k, v in self.components.items():\n vect_dot = k.args[1].dot(other)\n outvec += vect_dot * v * k.args[0]\n return outvec\n elif isinstance(other, Dyadic):\n outdyad = Dyadic.zero\n for k1, v1 in self.components.items():\n for k2, v2 in other.components.items():\n vect_dot = k1.args[1].dot(k2.args[0])\n outer_product = k1.args[0].outer(k2.args[1])\n outdyad += vect_dot * v1 * v2 * outer_product\n return outdyad\n else:\n raise TypeError(\"Inner product is not defined for \" +\n str(type(other)) + \" and Dyadics.\")", "def dot4(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_ANSWER (our solution is 3 lines of code, but don't worry if you deviate from this)\n return sum([v1[v1_key] * v2.get(v1_key, 0) for v1_key in v1])\n # END_YOUR_ANSWER", "def dot_product(a, b):\n if len(a) != len(b):\n raise ValueError('Arrays are of different lengths')\n else:\n return [a[i] * b[i] for i in range(len(a))]", "def dot(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n if len(vec2) != len(self):\n raise DifferentLengthVectors(self, vec2)\n\n return sum([self[i]*vec2[i] for i in range(len(self))])", "def vec_dot_star(v1,v2):\r\n \r\n dot_star = v1[0]*(v2[1])-v1[1]*v2[0]\r\n return dot_star", "def dot(v,w):\r\n return sum(v_i * w_i\r\n for v_i, w_i in zip(v, w))", "def matrix_dot_product(A, B):\r\n assert A.shape == B.shape\r\n return np.einsum('ij,ij->i', A, B)", "def pairwise_dot_product_similarity(x, y):\n return torch.mm(x, torch.transpose(y, 1, 0))", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def lpDot(v1, v2):\n\tif not isinstance(v1, list) and not isinstance(v2, list):\n\t\treturn v1 * v2\n\telif not isinstance(v1, list):\n\t\treturn lpDot([v1]*len(v2),v2)\n\telif not isinstance(v2, list):\n\t\treturn lpDot(v1,[v2]*len(v1))\n\telse:\n\t\treturn lpSum([lpDot(e1,e2) for e1,e2 in zip(v1,v2)])", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def _listdot(d1, d2):\n return [np.dot(x[0].T, x[1]) for x in zip(d1, d2)]", "def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:\n # q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2\n r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))\n return r", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def sparse_dot_product(first_vector, second_vector):\n # Initialize a dictionary\n product_vector = {}\n # Calculate the dot product of the two vectors\n for key_1, value_1 in first_vector.items():\n for key_2, value_2 in second_vector.items():\n if key_1 == key_2 and key_1 not in product_vector:\n product_vector[key_1] = value_1 * value_2\n elif key_1 == key_2 and key_1 in product_vector:\n product_vector[key_1] = value_1 * value_2\n elif key_1 not in product_vector:\n product_vector[key_1] = 0\n elif key_2 not in product_vector:\n product_vector[key_2] = 0\n # Initialize a variable\n result = 0\n # Calculate the sum of each product\n for product_key in list(product_vector.keys()):\n if product_key != 'length':\n result += product_vector[product_key]\n return result", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(xs: List[float], ys: List[float]) -> float:\n return sum(x * y for x, y in zip(xs, ys))", "def _mulVectors(X1,X2):\n _checkSize(X1,X2)\n return sum([ X1[i] * X2[i] for i in range(len(X1))])", "def dot(self, that):\n\t\tif(self.d != that.d):\n\t\t\traise ValueError(\"Vector lengths disagree\")\n\t\tsum = 0.0\n\n\t\t#iterate over the vector with the fewest nonzeroes\n\t\tif(self.st.size() <= that.st.size()):\n\t\t\tfor i in self.st.keys():\n\t\t\t\tif(that.st.contains(i)):\n\t\t\t\t\tsum += self.get(i) * that.get(i)\n\t\telse:\n\t\t\tfor i in that.st.keys():\n\t\t\t\tif(self.st.contains(i)):\n\t\t\t\t\tsum += self.get(i) * that.get(i)\n\t\treturn sum", "def dot(a, b):\n if issparse(a) or issparse(b):\n return dot_sparse(a, b)\n try:\n return a.dot(b)\n except AttributeError:\n return a @ b", "def test_dot(self):\n\n vec1 = Vec3(3, 4, 5)\n vec2 = Vec3(2, 3, 4)\n dot = vec1.dot(vec2)\n\n expected = 3 * 2 + 4 * 3 + 5 * 4\n\n self.assertEqual(dot, expected)", "def dot(left, right):\n if sp.sparse.issparse(right) or sp.sparse.issparse(left):\n result = left*right\n else:\n result = left.dot(right)\n return result", "def dot(v, w):\n l = list(zip(v, w))\n return sum(v_i * w_i for v_i, w_i in l)", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)" ]
[ "0.86972415", "0.8648256", "0.85929865", "0.85891086", "0.8570757", "0.8549981", "0.8507858", "0.847935", "0.84727925", "0.84720486", "0.84266394", "0.8419811", "0.8343178", "0.8338558", "0.83323467", "0.8256427", "0.82338524", "0.82235616", "0.82016593", "0.8183103", "0.8173647", "0.81126434", "0.80399406", "0.7994739", "0.79797643", "0.7923492", "0.79177207", "0.7913951", "0.7892828", "0.785035", "0.78304124", "0.7828941", "0.77942264", "0.7745791", "0.7718438", "0.76782495", "0.76743007", "0.76616216", "0.7658423", "0.76106536", "0.7595523", "0.7578377", "0.7566033", "0.7552591", "0.75493085", "0.7545872", "0.7522997", "0.75178605", "0.75022036", "0.7498161", "0.74800766", "0.7467332", "0.74625564", "0.74440986", "0.74393547", "0.7413244", "0.7373598", "0.73654383", "0.73584443", "0.7356388", "0.7355144", "0.73522395", "0.7333979", "0.72922146", "0.7289296", "0.72836775", "0.7259825", "0.71773803", "0.7168049", "0.7120212", "0.7116954", "0.71048486", "0.7099786", "0.7091481", "0.70590156", "0.7058447", "0.7056831", "0.7055843", "0.703552", "0.7011443", "0.70054305", "0.69956446", "0.6992639", "0.6952313", "0.6951439", "0.6951439", "0.6951439", "0.6951439", "0.6951439", "0.694081", "0.69202995", "0.69050896", "0.69043887", "0.68822414", "0.68622196", "0.68427384", "0.6806974", "0.6801401", "0.68005246", "0.6794267" ]
0.80322015
23
crossproduct(Vector,Vector) returns the cross product of both vectors. If only one is specified, cross product is made with vertical axis, thus returning its perpendicular in XY plane
def crossproduct(first, other=FreeCAD.Vector(0,0,1)): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.y*other.z - first.z*other.y, first.z*other.x - first.x*other.z, first.x*other.y - first.y*other.x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_product(v1, v2):\n return cg3d_vector.CG3dVector(\n v1[1] * v2[2] - v2[1] * v1[2],\n v1[2] * v2[0] - v2[2] * v1[0],\n v1[0] * v2[1] - v2[0] * v1[1]\n )", "def vector_cross(x, y):\n\n if(len(x) != len(y)):\n raise ValueError(\"vector lengths differ\")\n elif(len(x) > 3):\n raise ValueError(\"vector is more than 3D\")\n else:\n s = [x[1] * y[2] - x[2] * y[1],\n x[2] * y[0] - x[0] * y[2],\n x[0] * y[1] - x[1] * y[0]]\n return s", "def vec_cross(a,b):\r\n return [a[1] * b[2] - a[2] * b[1],\r\n a[2] * b[0] - a[0] * b[2],\r\n a[0] * b[1] - a[1] * b[0]]", "def cross(a, b):\n #return np.cross(a,b)\n\n return vector(a[1] * b[2] - a[2] * b[1],\n a[2] * b[0] - a[0] * b[2],\n a[0] * b[1] - a[1] * b[0])", "def cross(v1: Vector, v2: Vector) -> Vector: # Function is fucked TODO\n if len(v1.coords) != 3 or len(v2.coords) != 3:\n raise ValueError(\"Vectors have to be 3 fucking D, nøøb\")\n x = v1.y * v2.z - v1.z * v2.y\n y = v1.z * v2.x - v1.x * v2.z\n z = v1.x * v2.y - v1.y * v2.x\n return Vector(x, y, z)", "def cross(v1, v2):\n return np.cross(v1, v2)", "def test_cross():\n assert_equal(cross(Vector(1, 0, 0), Vector(0, 1, 0)), Vector(0, 0, 1))\n assert_equal(cross(Vector(1, 3, 2), Vector(-1, 1, 0)), Vector(-2, -2, 4))", "def cross(vec1, vec2):\n result = np.zeros(3)\n return cross_(vec1, vec2, result)", "def cross(v1: Vec2, v2: Vec2) -> float:\n return v1.x * v2.x + v1.y * v2.y", "def cross_product(p0,p1,p2):\n\treturn (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))", "def cross_(vec1, vec2, result):\n a1, a2, a3 = double(vec1[0]), double(vec1[1]), double(vec1[2])\n b1, b2, b3 = double(vec2[0]), double(vec2[1]), double(vec2[2])\n result[0] = a2 * b3 - a3 * b2\n result[1] = a3 * b1 - a1 * b3\n result[2] = a1 * b2 - a2 * b1\n return result", "def cross_product(a, b):\n return (a[1]*b[2] - a[2]*b[0],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0])", "def crossProduct(self, p1, p2):\n return (p1.x * p2.y - p1.y * p2.x)", "def cross_product(a,b):\n return [a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0]]", "def cross(self, v):\n if (len(self.mV) != 3) or (len(v) != 3):\n raise IndexError('Cross product is only for 2 3-vectors.')\n\n (x1, y1, z1) = (self.mV[0], self.mV[1], self.mV[2])\n (x2, y2, z2) = (v[0], v[1], v[2])\n x = y1 * z2 - y2 * z1\n y = z1 * x2 - z2 * x1\n z = x1 * y2 - x2 * y1\n return Vector(x, y, z)", "def crossProduct(p1, p2, p3):\n return (\n -(p1[1]*p2[0]) + p1[0]*p2[1] +\n p1[1]*p3[0] - p2[1]*p3[0] -\n p1[0]*p3[1] + p2[0]*p3[1]\n )", "def testCross(self):\n v1 = Vector(1, 0, 0)\n v2 = Vector(0, 1, 0)\n assert v1.cross(v2) == [0, 0, 1]\n assert v1.cross([0, 1, 0]) == Vector(0, 0, 1)\n\n v3 = Vector(-1, 0, 0)\n assert v2.cross(v3) == [0, 0, 1]\n\n assert Vector(0, 0, 1).cross(Vector(1, 0, 0)) == Vector(0, 1, 0)\n c = 0.707106781 # Cos 45\n assert Vector(0, 0, 3).cross(Vector(2*c, 0, 2*c)) == Vector(\n 0, 6*c, 0)\n\n c = 0.5 # cos 60deg\n s = 0.866025404 # sin 60deg\n assert Vector(0, 0, 3).cross(Vector(s, 0, c)) == Vector(0, 3*s, 0)\n assert Vector(0, 0, 3).cross([s, 0, c]) == [0, 3*s, 0]\n\n hitException = False\n try:\n v1 = Vector(1, 2, 3, 4)\n v2 = Vector(5, 6, 7, 8)\n v3 = v1.cross(v2)\n except IndexError:\n hitException = True\n assert hitException", "def cross(a, b):\n return np.array([a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]])", "def cross_vectors(u, v):\n return [u[1] * v[2] - u[2] * v[1],\n u[2] * v[0] - u[0] * v[2],\n u[0] * v[1] - u[1] * v[0]]", "def cross(self, vector):\n\n return Vector((self.y * vector.z - self.z * vector.y),\n (self.z * vector.x - self.x * vector.z),\n (self.x * vector.y - self.y * vector.x))", "def cross_product(a, b):\n a1, a2, a3 = a\n b1, b2, b3 = b\n return (a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1)", "def cross(a,b):\n \n return [ a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0],\n 1.0 ]", "def cross(a, b):\n c1 = a[1]*b[2] - a[2]*b[1]\n c2 = a[2]*b[0] - a[0]*b[2]\n c3 = a[0]*b[1] - a[1]*b[0]\n return sp.array([c1,c2,c3])", "def ucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.cross(ev, b)", "def cross(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return Vector3(0, 0, np.asscalar(np.cross(self, vec)))", "def cross_multiply(x):\n return (x[0][0] * x[1][1]) - (x[0][1] * x[1][0])", "def get_cross2d(v1, v2):\n return v1[0]*v2[1] - v1[1]*v2[0]", "def cross(self, vec):\n if not isinstance(vec, self.__class__):\n raise TypeError('Cross product operand must be a vector')\n return self.__class__(np.cross(self, vec))", "def cross(x, y):\n x = x.reshape(3)\n y = y.reshape(3)\n z = np.cross(x, y)\n z = z.reshape((3, 1))\n return z", "def vector_cross(v, w):\n res = np.cross(v, w)\n\n if len(v) == 3:\n return Vector(*res)\n else:\n return res", "def d_cross(a, b):\n d_cross = np.zeros((3, 3), dtype=float)\n for i in range(3):\n ei = np.zeros(3, dtype=float)\n ei[i] = 1.0\n d_cross[i] = np.cross(ei, b)\n return d_cross", "def cross(triangles):\n vectors = np.diff(triangles, axis=1)\n crosses = np.cross(vectors[:, 0], vectors[:, 1])\n return crosses", "def cross(u,v):\n u1, u2, u3 = u\n v1, v2, v3 = v\n return np.array([u2*v3 - u3*v2,\n u3*v1 - u1*v3,\n u1*v2 - u2*v1], dtype=u.dtype)", "def crossProduct(self, factor):\n components = np.cross(self.components(), factor.components())\n return Vector.initializeFromComponents(components)", "def cross(o, a, b):\r\n xo, yo = o\r\n xa, ya = a\r\n xb, yb = b\r\n return (xa - xo)*(yb - yo) - (ya - yo)*(xb - xo)", "def perpendicular_vector(v):\n if v[1] == 0 and v[2] == 0:\n if v[0] == 0:\n raise ValueError(\"zero vector\")\n else:\n return np.cross(v, [0, 1, 0])\n return np.cross(v, [1, 0, 0])", "def cross(self):\n return self.v.cross(self.z_axis)", "def cross_z(self):\n return self.v.cross(Vector((0, 0, 1)))", "def cross(a, b):\n # since the data can be n-dimensional, reshape\n # to a 2-d (3, N) array\n xyz_a, xyz_b = a.xyz, b.xyz\n orig_shape_a = xyz_a.shape\n orig_shape_b = xyz_b.shape\n xyz_a = xyz_a.reshape((3, xyz_a.size // 3))\n xyz_b = xyz_b.reshape((3, xyz_b.size // 3))\n\n # take the cross product\n cross_product = np.cross(xyz_a[:, :, np.newaxis], xyz_b,\n axisa=0, axisb=0, axisc=0)\n cross_product_unit = xyz_a.unit * xyz_b.unit\n cross_product = u.Quantity(cross_product, unit=cross_product_unit)\n\n cartrep = CartesianRepresentation(cross_product)\n return cartrep.reshape(orig_shape_a[1:] + orig_shape_b[1:])", "def normalized_cross(a, b):\n c = np.cross(a, b)\n length = sqrt(c.dot(c))\n return c/length if length > 0 else c", "def cross(self, vec):\n if not isinstance(vec, Vector3Array):\n raise TypeError('Cross product operand must be a Vector3Array')\n if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:\n raise ValueError('Cross product operands must have the same '\n 'number of elements.')\n return Vector3Array(np.cross(self, vec))", "def ncross2(u, v):\n return sq2(u) * sq2(v) - dot2(u, v) ** 2", "def xCrossProd(self, other):\n return other.y * self.z - other.z * self.y", "def test_cross_v3(self):\n\n vec1 = Vec3(1, 0, 0)\n vec2 = Vec3(0, 1, 0)\n cross = vec1.cross(vec2)\n\n expected = Vec3(0, 0, 1)\n\n self.assertEqual(cross, expected)", "def cross(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n if (len(self) or len(vec2)) != 3:\n raise Exception(\"Incorrect vector lengths. Must be two 3 length vectors.\")\n\n return Vector(self[1]*vec2[2] - self[2]*vec2[1],\n self[2]*vec2[0] - self[0]*vec2[2],\n self[0]*vec2[1] - self[1]*vec2[0])", "def cross(self, other):\n\n Vector = sympy.vector.Vector\n if other == Vector.zero:\n return Dyadic.zero\n elif isinstance(other, Vector):\n outdyad = Dyadic.zero\n for k, v in self.components.items():\n cross_product = k.args[1].cross(other)\n outer = k.args[0].outer(cross_product)\n outdyad += v * outer\n return outdyad\n else:\n raise TypeError(str(type(other)) + \" not supported for \" +\n \"cross with dyadics\")", "def vector_dot(v1,v2):\n return (v1.x * v2.x) + (v1.y * v2.y) + (v1.z * v2.z)", "def crossProduct( set1, set2):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 3))\n set2 = asarray( set2, _aformat(set2))\n set2 = reshape( set2, (-1, 3))\n return cross( set1, set2 )", "def cross(p, q):\n xyz = np.zeros(3)\n xyz[0] = p[1] * q[2] - p[2] * q[1]\n xyz[1] = p[2] * q[0] - p[0] * q[2]\n xyz[2] = p[0] * q[1] - p[1] * q[0]\n return xyz", "def cross(self, other):\n if isinstance(other, float):\n return Vector(other*self.y, -other*self.x)\n\n if isinstance(other, Vector):\n return self.x*other.y - self.y*other.x", "def test_perpendicular_to_vectors():\n random_state = np.random.RandomState(0)\n a = pr.norm_vector(pr.random_vector(random_state))\n a1 = pr.norm_vector(pr.random_vector(random_state))\n b = pr.norm_vector(pr.perpendicular_to_vectors(a, a1))\n c = pr.norm_vector(pr.perpendicular_to_vectors(a, b))\n assert_almost_equal(pr.angle_between_vectors(a, b), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(a, c), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(b, c), np.pi / 2.0)\n assert_array_almost_equal(pr.perpendicular_to_vectors(b, c), a)\n assert_array_almost_equal(pr.perpendicular_to_vectors(c, a), b)", "def cross2(u, v, w):\n return dot2(u, w) * v - dot2(u, v) * w", "def spm_cross(X, x=None, *args):\n\n if len(args) == 0 and x is None:\n if X.dtype == \"object\":\n Y = spm_cross(*list(X))\n\n elif np.issubdtype(X.dtype, np.number):\n Y = X\n\n return Y\n\n if X.dtype == \"object\":\n X = spm_cross(*list(X))\n\n if x is not None and x.dtype == \"object\":\n x = spm_cross(*list(x))\n\n reshape_dims = tuple(list(X.shape) + list(np.ones(x.ndim, dtype=int)))\n A = X.reshape(reshape_dims)\n\n reshape_dims = tuple(list(np.ones(X.ndim, dtype=int)) + list(x.shape))\n B = x.reshape(reshape_dims)\n\n Y = np.squeeze(A * B)\n\n for x in args:\n Y = spm_cross(Y, x)\n\n return Y", "def cross_z(self):\n return Vector((self.v.y, -self.v.x))", "def liner_cross_point(a1, b1, c1, a2, b2, c2):\n if a1 == 0 or a2 == 0:\n if a2 == 0:\n a1, b1, c1, a2, b2, c2 = a2, b2, c2, a1, b1, c1\n y = - c1 / b1\n x = - (b2 * y + c2) / a2\n elif b1 == 0 or b2 == 0:\n if b2 == 0:\n a1, b1, c1, a2, b2, c2 = a2, b2, c2, a1, b1, c1\n x = - c1 / a1\n y = - (a2 * x + c2) / b2\n else:\n a1, b1, c1 = a1 / b1, b1 / b1, c1 / b1\n a2, b2, c2 = a2 / b2, b2 / b2, c2 / b2\n x = - (c1 - c2) / (a1 - a2)\n y = - a1 * x - c1\n return x, y", "def cross(self, other):\n \n return self.x * other[1] - self.y * other[0]", "def cross(self, other):\n if self.x == other.x:\n if self.x == 0:\n return other\n else:\n cross = getcopy(self)\n for row in other.a:\n cross.newrow(row)\n cross.newrow([self.prepare(1.0)]*cross.x)\n out = cross.new(1)\n for x in xrange(0, out.x):\n out.store(0,x, cross.minor(cross.y-1, x).det())\n return out\n else:\n raise IndexError(\"Matrix cross product invalid for dimensions \"+str(self.y)+\"x\"+str(self.x)+\" and \"+str(other.y)+\"x\"+str(other.x))", "def cross(self, other):\n return self.x * other.y - self.y * other.x", "def cross(self, other: PointOrIterable) -> float:\n try:\n return (self.x * other.y) + (self.y * other.x)\n except AttributeError:\n pass\n return (self.x * other[1]) + (self.y * other[0])", "def outer_product(u: Vector3D, v: Vector3D):\n cx = u.y * v.z - u.z * v.y\n cy = u.z * v.x - u.x * v.z\n cz = u.x * v.y - u.y * v.x\n return Vector3D(cx, cy, cz, coordinate_system='cartesian')", "def cross(self, other):\n\t\treturn Vector3(\n\t\t\tself.y * other.z - self.z * other.y,\n\t\t\tself.z * other.x - self.x * other.z,\n\t\t\tself.x * other.y - self.y * other.x,\n\t\t)", "def crossProduct4( set1, set2 ):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 4))\n set2 = asarray( set2, _aformat(set1))\n set2 = reshape( set2, (-1, 4))\n result = zeros( (len(set1),4), _aformat(set1))\n result[:,:3] = cross( set1[:,:3],set2[:,:3])\n result[:,3] = 1.0\n return result", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def cross(self, other):\n return self.x*other[1] - self.y*other[0]", "def cross_pts_triangle(p1, p2, p3):\n return (p1[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[:, 1] - p3[1])", "def vector_dot(xyz, vector):\n if len(vector) != 3:\n raise Exception(\n \"vector should be length 3, the provided length is {}\".format(\n len(vector)\n )\n )\n return vector[0]*xyz[:, 0] + vector[1]*xyz[:, 1] + vector[2]*xyz[:, 2]", "def vector_dot(x, y):\n\n if(len(x) != len(y)):\n raise ValueError(\"vector lengths differ\")\n else:\n # return x1*y1+x2*y2+...xn*yn\n return sum([x[i] * y[i] for i in range(len(x))])", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def cross( self, vector3 ):\n product = cross( self.array, vector3.array )\n self._coords[:3] = matrix( product ).transpose()\n return self", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def cross3(self, left, right):\n return np.array([left[1] * right[2] - left[2] * right[1],\n left[2] * right[0] - left[0] * right[2],\n left[0] * right[1] - left[1] * right[0]])", "def cross(self, other):\n\n return self.x * other.y - self.y * other.x", "def perpendicularTo(self, vector):\n perpendicular = self.subtractVector(self.parallelTo(vector))\n return perpendicular", "def __xor__(self,v2):\n\t\treturn np.cross(self._vec,v2._vec)", "def crossform(a):\n return np.array([[0, -a[2], a[1]],\n [a[2], 0, -a[0]],\n [-a[1], a[0], 0]])", "def cross_matrix(v):\n\treturn np.array([\n\t\t[ 0, -v[2], v[1]],\n\t\t[ v[2], 0, -v[0]],\n\t\t[-v[1], v[0], 0]])", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def cross(self, other):\n ox, oy = other\n return self[0] * oy - self[1] * ox", "def calcCrossMag(v1,v2):\n # Calculate the magnitude of cross product of two vectors\n\n return(abs(np.linalg.norm(np.cross(v1,v2))))", "def same_side_product(p, q, a, b):\n return line_ccw(a, b, p) * line_ccw(a, b, q)", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def test_cross_v3(self):\n\n from pedemath.vec3 import cross_v3\n\n cross = cross_v3(Vec3(0, -1, 0), Vec3(0, 0, -1))\n expected = Vec3(1, 0, 0)\n\n self.assertEqual(cross, expected)", "def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def perpendicular_bisector(point_1, point_2):\r\n A = 2 * (point_2.x - point_1.x)\r\n B = 2 * (point_2.y - point_1.y)\r\n C = (point_1.y - point_2.y) * (point_1.y + point_2.y) + \\\r\n (point_1.x - point_2.x) * (point_1.x + point_2.x)\r\n return np.matrix([[A],[B],[C]])", "def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()", "def dotproduct(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return (first.x*other.x + first.y*other.y + first.z*other.z)", "def parallel(self, vector):\n\n # if self.cross(vector).magnitude() == 0 or self.cross(vector.negative()).magnitude() == 0:\n if abs(self.cross(vector).magnitude()) < 0.01 : # @todo @caution:: find a better way.\n return True\n return False", "def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])", "def dot_product(vector1, vector2):\n out = None\n ### YOUR CODE HERE\n out=np.dot(vector1,vector2)\n ### END YOUR CODE\n\n return out", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def cross_product(self, B:'Matrix') -> 'Matrix':\n assert self.shape() == B.shape(), f\"For cross product, shapes should be (1x3) - these are {self.shape()} and {B.shape()}.\"\n if self.shape()[1] == 1: # checking for a (3 x 1) matrix, in which case, we'll use the transposes.\n assert self.shape()[0] > 2 , f\"self must be at least 3 in one direction. This is {self.shape()}\"\n return self.transpose().cross(B.transpose())\n assert self.shape()[1] > 2, f\"self must be at least 3 in one direction. This is {self.shape()}\"\n\n return Matrix(((self.mat[0][1] * B.mat[0][2] - self.mat[0][2] * B.mat[0][1],\n self.mat[0][2] * B.mat[0][0] - self.mat[0][0] * B.mat[0][2],\n self.mat[0][0] * B.mat[0][1] - self.mat[0][1] * B.mat[0][0]),))", "def mirror_vector_vector(v1, v2):\n return subtract_vectors(v1, scale_vector(v2, 2 * dot_vectors(v1, v2)))" ]
[ "0.8131378", "0.802793", "0.79265344", "0.7779056", "0.77629125", "0.77045447", "0.76780075", "0.75904584", "0.7525282", "0.7513922", "0.7469388", "0.7426301", "0.7420362", "0.7362331", "0.73169047", "0.7263604", "0.72250664", "0.7215048", "0.72099155", "0.71860886", "0.71698695", "0.71268326", "0.71265066", "0.71000427", "0.70718366", "0.70548105", "0.7052511", "0.69804305", "0.69800276", "0.69546455", "0.68689525", "0.6830143", "0.67920715", "0.67797697", "0.67766804", "0.6766795", "0.6755934", "0.6691072", "0.6664645", "0.6644105", "0.6641514", "0.66366583", "0.6620486", "0.6605198", "0.65890974", "0.65784776", "0.65535855", "0.65417373", "0.6526282", "0.6483587", "0.6441277", "0.6423369", "0.6388669", "0.6365967", "0.63538635", "0.633384", "0.6316163", "0.62838614", "0.62748307", "0.6254499", "0.62427723", "0.62254375", "0.6219475", "0.6209882", "0.6188334", "0.61778915", "0.6175647", "0.61605334", "0.61474586", "0.61420906", "0.6138487", "0.61193687", "0.60991806", "0.60956013", "0.6087555", "0.6087481", "0.60721695", "0.6059368", "0.605012", "0.6038409", "0.6037681", "0.6024113", "0.6017049", "0.6016231", "0.6000017", "0.59988195", "0.599765", "0.5991825", "0.59900033", "0.5987154", "0.59678525", "0.59574604", "0.5954887", "0.59356797", "0.593072", "0.59173304", "0.5888381", "0.5881318", "0.5878737", "0.5866178" ]
0.72218657
17
angle(Vector,Vector) returns the angle in radians between the two vectors. If only one is given, angle is between the vector and the horizontal East direction
def angle(first, other=FreeCAD.Vector(1,0,0)): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return math.acos(dotproduct(normalized(first),normalized(other)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def angle(vec1, vec2):\n\n return math.acos(dotproduct(vec1, vec2) / (length(vec1) * length(vec2)))", "def angle_between_vectors(vec1, vec2):\n vec = vec1 - vec2\n vec = vec.perpendicular()\n return vec.angle", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle", "def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta", "def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))", "def angle(self, vector):\n\n return (math.degrees(math.acos((self.dot(vector) / (self.magnitude() *\n vector.magnitude())))))", "def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle", "def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle", "def angle(vec1, vec2):\n assert vec1.shape == vec2.shape\n \n cos_vec = np.inner(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))\n angle = math.acos(cos_vec)\n in_deg = math.degrees(angle)\n if in_deg >= 90:\n return (180-in_deg)\n return in_deg", "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def vector_angle_finder(vect_1, vect_2):\n theta = np.arccos(np.dot(vect_1, vect_2) / (magnitude_vect(vect_1) * magnitude_vect(vect_2)))\n angle = theta * 180 / math.pi\n return angle", "def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))", "def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle", "def cal_angle_between_two_vectors(vec_1, vec_2):\n unit_vec_1 = vec_1 / np.linalg.norm(vec_1)\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n dot_product = np.dot(unit_vec_1, unit_vec_2)\n \n return np.arccos(dot_product) / np.pi * 180", "def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)", "def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest", "def angle(p1, p2):\n return dot(p1, p2)", "def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))", "def AngleBetween(a, b):\n r = a.Length() * b.Length()\n if r < 1.0e-8:\n return BadVectorError()\n dot = (a.x*b.x + a.y*b.y + a.z*b.z) / r\n if dot <= -1.0:\n return 180.0\n if dot >= +1.0:\n return 0.0\n return math.degrees(math.acos(dot))", "def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle", "def angle_between_vectors_degrees(u, v):\n return np.degrees(\n math.acos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))", "def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle", "def angle_between(v1: Vec2, v2: Vec2):\n v = dir_vector(v1, v2)\n a = atan2(v.y, v.x)\n if a < 0:\n a = 2 * pi + a\n return a", "def angle_between_vectors_degrees(u, v):\n a = np.dot(u, v)\n b = np.linalg.norm(u)\n c = np.linalg.norm(v)\n d = a / (b* c)\n if d > 1:\n d = 1\n if d < -1:\n d = -1\n e = acos(d)\n f = np.degrees(e)\n return f", "def vertical_angle(A, B):\n if A is None or B is None:\n return None\n return degrees(atan2(B[1] - A[1], B[0] - A[0]) - pi / 2)", "def calcul_angle_vector(vec1, vec2):\n \n try:\n div=(vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n if div>1:\n div=1\n if div<-1:\n div=-1\n #KC#CG# tranlation to degrees\n angle=180/math.pi*math.acos(div)\n except:\n print vec1\n print vec2\n print (vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n return angle", "def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)", "def angle_between_vectors(self, u, v):\n vec1_unit = self.get_unit_vector(u)\n vec2_unit = self.get_unit_vector(v)\n return np.arccos(np.clip(np.dot(vec1_unit, vec2_unit), -1.0, 1.0)) * (180/math.pi)", "def angle_between(vec1, vec2, radian=True):\n cos = np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)\n angle = np.arccos(np.clip(cos, -1, 1))\n if not radian:\n angle = angle / np.pi * 180\n return angle", "def _angle(*vectors):\n if len(vectors) == 1:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[0][1], vectors[0][0]))\n elif len(vectors) == 2:\n return DubinsUAV2D._sawtooth(np.arctan2(vectors[1][1], vectors[1][0]) - np.arctan2(vectors[0][1], vectors[0][0]))\n else:\n raise AttributeError()", "def angle(p1, p2):\n x_dist = p2[0] - p1[0]\n y_dist = p2[1] - p1[1]\n return math.atan2(-y_dist, x_dist) % (2 * math.pi)", "def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))", "def angle(v1,v2, deg = False):\n # v1.v2 = ||v1||||v2|| cos(angle) => angle = arcos(v1.v2/||v1||||v2||)\n # see more: http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n # tested with http://codereview.stackexchange.com/a/54413\n if deg: return np.rad2deg(np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))) # *180.0/np.pi\n return np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])", "def angle(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n from math import acos\n return acos(self.dot(vec2) / (self.magnitude() * vec2.magnitude()))", "def compute_angle(self, direction):\n scaled_cosine = self.w1.dot(direction) # ||direction|| cos(theta)\n scaled_sine = self.w2.dot(direction) # ||direction|| sin(theta)\n return np.arctan2(scaled_sine, scaled_cosine)", "def angle_btw(v1, v2):\n cos_ang = np.dot(v1, v2)\n sin_ang = np.linalg.norm(np.cross(v1, v2))\n return np.arctan2(sin_ang, cos_ang) * 180 / math.pi", "def angle_between(a, b):\n from math import acos\n return acos( dot_product(a, b) / (magnitude(a) * magnitude(b)) )", "def angle(firstPoint, secondPoint):\n\txDiff = secondPoint.x - firstPoint.x\n\tyDiff = secondPoint.y - firstPoint.y\n\treturn math.degrees(math.atan2(yDiff, xDiff))", "def get_vec_angle(vec1: List, vec2: List) -> Union[float, None]:\n if np.linalg.norm(np.array(vec1)) == 0 or np.linalg.norm(np.array(vec2)) == 0:\n warnings.warn(\"Do not input 0 vector\")\n return\n\n diff_degree = np.dot(np.array(vec1), np.array(vec2))\n diff_degree /= np.linalg.norm(np.array(vec1))\n diff_degree /= np.linalg.norm(np.array(vec2))\n diff_degree = np.clip(diff_degree, -1, 1)\n diff_degree = np.arccos(diff_degree) * 180 / np.pi\n return diff_degree", "def angle_between(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n\n # We return negative because pyglet and math treat rotation differently\n return -math.atan2(dy, dx)", "def angle(self, vec, unit='rad'):\n raise NotImplementedError('angle not implemented for VectorArrays')", "def __get_angle(self, names, vecA, vecB):\n pivot = max(names, key=names.count)\n\n if names[0] != pivot: # Atoms needs to be order to pick vectors correctly\n vecA = vecA * -1\n\n if names[2] != pivot:\n vecB = vecB * -1\n\n radians = vecA.AngleTo(vecB)\n angle = 180 / math.pi * radians\n\n return angle", "def angle(pt_a, pt_b):\n x1, y1 = pt_a\n x2, y2 = pt_b\n return atan2(y2-y1, x2-x1)", "def vec2angle(vec):\n return round(atan2(vec[1], vec[0]), 3)", "def angle(self):\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm,self.scalar))", "def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang", "def get_vector(a, b):\n dx = float(b[0] - a[0])\n dy = float(b[1] - a[1])\n\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n if dy > 0:\n angle = math.degrees(math.atan(-dx / dy))\n elif dy == 0:\n if dx < 0:\n angle = 90.0\n elif dx > 0:\n angle = -90.0\n else:\n angle = 0.0\n else:\n if dx < 0:\n angle = 180 - math.degrees(math.atan(dx / dy))\n elif dx > 0:\n angle = -180 - math.degrees(math.atan(dx / dy))\n else:\n angle = 180.0\n\n return distance, angle", "def angle_between_vectors(u, v):\r\n mag_u = math.sqrt(u[0]**2 + u[1]**2 + u[2]**2)\r\n mag_v = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)\r\n dot_prod = u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\r\n return math.acos(dot_prod/(mag_u*mag_v))", "def angle(p1, p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n if dx == 0:\n if dy == 0:\n return 0\n return 90\n alpha = math.atan(dy / dx) * 180 / math.pi\n if alpha < 0:\n alpha = 180 - alpha\n return alpha", "def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def angle_to( self, vector3 ):\n # make sure neither vector is zero-length\n sm = self.magnitude\n vm = vector3.magnitude\n if abs(sm) < self.EPSILON or abs(vm) < self.EPSILON:\n raise ZeroDivisionError(\n \"can't calculate angle between zero-length vectors!\" )\n \n # calculation will fail if vectors have same heading\n # catch error and return zero\n try:\n return math.degrees( math.acos(self.dot(vector3) / (sm * vm)) )\n except ValueError:\n # test whether direction is same or opposite\n if Vector3( self ).add( vector3 ).magnitude < sm:\n return 180.0\n return 0.0", "def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)", "def get_angle_between_vectors(self, A, B):\n\t\tdot_prod = A[0]*B[0] + A[1]*B[1]\n\t\tlen_A = math.sqrt(A[0]**2 + A[1]**2)\n\t\tlen_B = math.sqrt(B[0]**2 + B[1]**2)\n\n\t\treturn math.acos(dot_prod / (len_A + len_B))", "def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result", "def getangle(p1, p2):\n\treturn atan2( p2[1]-p1[1], p2[0]-p1[0] )", "def angle(point1, point2):\n return atan2(point2.y() - point1.y(), point2.x() - point1.x())", "def get_interior_angle(vec0, vec1):\n angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))\n degrees = abs(np.degrees(angle))\n # Min and max should be between 0° an 90°.\n degrees = min(degrees, 180.0 - degrees)\n return degrees", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def angle(self, vec, unit='rad'):\n if not isinstance(vec, self.__class__):\n raise TypeError('Angle operand must be of class {}'\n .format(self.__class__.__name__))\n if unit not in ['deg', 'rad']:\n raise ValueError('Only units of rad or deg are supported')\n\n denom = self.length * vec.length\n if denom == 0:\n raise ZeroDivisionError('Cannot calculate angle between '\n 'zero-length vector(s)')\n\n ang = np.arccos(self.dot(vec) / denom)\n if unit == 'deg':\n ang = ang * 180 / np.pi\n return ang", "def angle(dx, dy):\n\n return math.atan2(dy, dx)", "def atan2_vec(vector):\n return -np.arctan2(vector[1], vector[0])", "def angle_in_degrees(x, y):\n return math.atan2(y, x) / math.pi * 180", "def test_angle_between_vectors():\n v = np.array([1, 0, 0])\n a = np.array([0, 1, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 1, 0])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])\n v = np.array([0, 0, 1])\n a = np.array([1, 0, 0, np.pi / 2])\n R = pr.matrix_from_axis_angle(a)\n vR = np.dot(R, v)\n assert_almost_equal(pr.angle_between_vectors(vR, v), a[-1])", "def angle(v,w):\n cosx = dot_product(v,w) / (length(v) * length(w))\n #det = determinant(A,B)\n rad = math.acos(cosx) # in radians\n return rad\n #return rad*180/math.pi # returns degrees", "def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret", "def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta", "def calc_angle(v1, v2, v3):\n v1 = v1 - v2\n v3 = v3 - v2\n return v1.angle(v3)", "def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))", "def calculate_angle(start: tuple, end: tuple):\n radians = -math.atan2(end[0] - start[0], end[1] - start[1])\n return math.degrees(radians) % 360", "def angle_of_vector(vector):\n z = complex(*vector[:2])\n if z == 0:\n return 0\n return np.angle(complex(*vector[:2]))", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def angle(x, y, deg=False):\n rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y)))\n if deg:\n return rad_angle*(180.0/np.pi)\n else:\n return rad_angle", "def angle(p0, p1, prv_ang=0):\r\n ang = math.atan2(p0[1] - p1[1], p0[0] - p1[0])\r\n a0 = (ang - prv_ang)\r\n a0 = a0 % (PI * 2) - PI\r\n return a0", "def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle(self, factor):\n n1 = self.getNormalizedVector()\n n2 = factor.getNormalizedVector()\n\n # Determine angle between the two vectors.\n cos_angle = n1.scalarProduct(n2)\n angle = np.arccos(cos_angle)\n # Edoardo: numpy.arccos() always returns an angle in radians in [0, pi].\n\n # Mark's version:\n # By convention always return the smaller angle.\n # while angle > 2.0 * np.pi:\n # angle -= 2.0 * np.pi\n\n # if angle > np.pi:\n # angle = 2.0 * np.pi - angle\n\n return angle", "def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi", "def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))", "def angleTo(x1, y1, x2, y2):\n assert not (x1 == 0 and y1 == 0) and not (x2 == 0 and y2 == 0), \"neither point should be the origin\"\n if x1 == x2:\n if y1 < y2:\n return math.pi / 2\n elif y1 == y2:\n return 0\n return math.pi * 3 / 2\n dx, dy = x2 - x1, y2 - y1\n rawDeg = math.atan(dy / dx)\n if dx < 0:\n rawDeg += math.pi\n return rawDeg % (math.pi * 2)", "def addVectors((angle1, length1), (angle2, length2)):\n x = math.sin(angle1) * length1 + math.sin(angle2) * length2\n y = math.cos(angle1) * length1 + math.cos(angle2) * length2\n length = math.hypot(x,y)\n angle = 0.5 * math.pi - math.atan2(y,x)\n return (angle, length)", "def test_vectors_angle2(self):\n\n # Example 1.4\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle2_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle2_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)", "def get_angle(p0, p1=np.array([0, 0]), p2=None):\n if p2 is None:\n p2 = p1 + np.array([1, 0])\n v0 = np.array(p0) - np.array(p1) \n v1 = np.array(p2) - np.array(p1)\n\n angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))\n return np.degrees(angle)", "def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha", "def einsum_angle_between (vector_array_1, vector_array_2 ):\r\n\r\n # diagonal of dot product\r\n diag = np.clip (np.einsum ('ij,ij->i', vector_array_1, vector_array_2 ), -1, 1 )\r\n\r\n return np.arccos (diag )", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))" ]
[ "0.7984714", "0.7903248", "0.7839204", "0.7763829", "0.76707727", "0.7667527", "0.7662782", "0.76549405", "0.76480985", "0.76240003", "0.7578548", "0.75698555", "0.7558118", "0.75398844", "0.7527476", "0.75202245", "0.7511977", "0.7507649", "0.7465797", "0.7461353", "0.7453873", "0.74406946", "0.73900396", "0.7370785", "0.7348387", "0.7317723", "0.73062503", "0.7265029", "0.7238063", "0.71964705", "0.7191882", "0.7171148", "0.71665585", "0.71591336", "0.71183956", "0.71130466", "0.71077657", "0.7097901", "0.707303", "0.70718384", "0.70680016", "0.7053709", "0.7048839", "0.7029809", "0.70052254", "0.70039314", "0.7000171", "0.6997325", "0.6994539", "0.6985689", "0.69848835", "0.6973893", "0.69309044", "0.6923165", "0.6914346", "0.69100124", "0.6906545", "0.6895152", "0.6889222", "0.68856084", "0.6880063", "0.6879245", "0.6878366", "0.6876012", "0.6870236", "0.686051", "0.6847197", "0.68429124", "0.68270737", "0.68201727", "0.6810558", "0.67988443", "0.6797911", "0.6796017", "0.6775526", "0.6759436", "0.67440325", "0.6743426", "0.67310756", "0.6721902", "0.6716996", "0.6704175", "0.6703679", "0.6679802", "0.6671878", "0.66586334", "0.66575676", "0.6653739", "0.66514426", "0.66300833", "0.6593634", "0.6586078", "0.65817183", "0.6570283", "0.6561615", "0.6561602", "0.6560523", "0.65549946", "0.6538979", "0.6534281" ]
0.7572855
11
sets the desired capacity of the underlying ASG directly. note that this is for internal control. for scaling purposes, please use scale() instead.
def set_desired_capacity(self, new_desired_capacity): scale_out = new_desired_capacity - self.desired_capacity assert scale_out >= 0 if scale_out == 0: return CompletedFuture(False) remaining_instances = self.client.get_remaining_instances(self.resource_group, self.instance_type) futures = [] for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)): if scale_set.capacity < _SCALE_SET_SIZE_LIMIT: if self.slow_scale: new_group_capacity = scale_set.capacity + 1 else: new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out, scale_set.capacity + remaining_instances) if scale_set.provisioning_state == 'Updating': logger.warn("Update of {} already in progress".format(scale_set.name)) continue if scale_set.provisioning_state == 'Failed': logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name)) continue scale_out -= (new_group_capacity - scale_set.capacity) remaining_instances -= (new_group_capacity - scale_set.capacity) # Update our cached version self.scale_sets[scale_set.name].capacity = new_group_capacity futures.append(self.client.update_scale_set(scale_set, new_group_capacity)) logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity)) if scale_out == 0 or remaining_instances == 0: break if remaining_instances == 0: logger.warning("Out of quota for {}!".format(self.instance_type)) if scale_out > 0: logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self)) self.desired_capacity = new_desired_capacity - scale_out logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity)) return TransformingFuture(True, AllCompletedFuture(futures))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def set_capacity(self, capacity):\r\n params = {\r\n 'AutoScalingGroupName' : self.name,\r\n 'DesiredCapacity' : capacity,\r\n }\r\n req = self.connection.get_object('SetDesiredCapacity', params,\r\n Request)\r\n self.connection.last_request = req\r\n return req", "def capacity(self, capacity):\n\n self._capacity = capacity", "def scale_asg(asg_name, current_desired_capacity, new_desired_capacity, new_max_size):\n logger.info('Setting asg desired capacity from {} to {} and max size to {}...'.format(current_desired_capacity, new_desired_capacity, new_max_size))\n if not app_config['DRY_RUN']:\n response = client.update_auto_scaling_group(\n AutoScalingGroupName=asg_name,\n DesiredCapacity=new_desired_capacity,\n MaxSize=new_max_size)\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS scale up operation did not succeed. Exiting.')\n raise Exception('AWS scale up operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg scaling due to dry run flag set')", "def capacity(self, value: typing.Union[str, int, None]):\n self._properties[\"capacity\"] = _types.integer_or_string(value)", "def expandable_capacity(self, expandable_capacity):\n\n self._expandable_capacity = expandable_capacity", "def capacity_factor(self, value: float) -> None:\n # State S, I, E, SE, or EE\n self._capacity_factor = value", "def _resize(self, new_cap):\n new_array = ba(new_cap)\n\n for i in range(self.count):\n new_array[i] = self.the_array[i]\n\n self.the_array = new_array\n self.capacity = new_cap", "def physical_capacity(self, physical_capacity):\n\n self._physical_capacity = physical_capacity", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def _resize(self, capacity):\n B = self._make_array(capacity)\n for i in range(self._size):\n B[i] = self._data[i]\n self._data = B\n self._capacity = capacity", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def capacity(self):\n raise NotImplementedError()", "def logical_capacity(self, logical_capacity):\n\n self._logical_capacity = logical_capacity", "def __init__(__self__, *,\n active_capacity: int,\n capacity: Optional[int] = None,\n scale_type: Optional[str] = None):\n pulumi.set(__self__, \"active_capacity\", active_capacity)\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if scale_type is not None:\n pulumi.set(__self__, \"scale_type\", scale_type)", "def accl_scale_selection(self):\r\n\t\tbus.write_byte_data(LSM330_ACCL_ADDRESS, LSM330_CTRL_REG4_A, LSM330_ACCL_RANGE_2G)", "def setscaling(self, scaling):\n\n self.__scaling = scaling", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def new_capacity_rule(mod, g, p):\n return 0", "def user_capacity(self, user_capacity: SmartSsdUserCapacity):\n\n self._user_capacity = user_capacity", "def grow(self):\n self.mass *= 1.1", "def allocate(self, val):\n self.at_options.allocate = 1 if val else 0", "def capacity_used(self):\n raise NotImplementedError()", "def capacity_runway(self, capacity_runway):\n\n self._capacity_runway = capacity_runway", "def capacity(self):\n return self._cap", "def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp", "def set_constraint(self, g, g_min, g_max):\n self.g += g\n self.g_min += g_min\n self.g_max += g_max", "def __resize(self, new_capacity):\r\n B = self.make_array(new_capacity) # create new array\r\n for k in range(self.n):\r\n B[k] = self.A[k]\r\n\r\n self.A = B\r\n self.capacity = new_capacity", "def trace_set_buffer_capacity(self, size):\n cmd = enums.JLinkTraceCommand.SET_CAPACITY\n data = ctypes.c_uint32(size)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to set trace buffer size.')\n return None", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def capacitygroup_group():", "def resize_memory(self, new_size=None):\n\n self.container.capacity = new_size", "def _resize(self, cap): # nonpublic utitity\n B = self._make_array(cap) # new (bigger) array\n for k in range(self._size): # for each existing value\n B[k] = self._Array[k]\n self._Array = B # use the bigger array\n self._capacity = cap", "def set_autoscale_group_capacity(group, num_instances,\n attributes=('min_size', 'max_size', 'desired_capacity')):\n conn = boto.ec2.autoscale.connect_to_region(\"us-east-1\")\n group = conn.get_all_groups(names=[group])[0]\n for attrib in attributes:\n setattr(group, attrib, num_instances)\n if attributes:\n group.update()", "def resize(self, cap):\n temp_pq = [0] * (cap + 1)\n temp_qp = [-1] * (cap + 1)\n temp_keys = [None] * (cap + 1)\n for i in range(cap + 1):\n temp_pq[i] = self.__pq[i]\n temp_qp[i] = self.__qp[i]\n temp_keys[i] = self.__keys[i]\n self.__pq = temp_pq\n self.__qp = temp_qp\n self.__keys = temp_keys\n self._max_n = cap\n if self.__n > self._max_n:\n self.__n = self._max_n", "def _resize(self, new_capacity):\n temp_array = self.make_array(new_capacity)\n for i in range(self.n):\n temp_array[i] = self.original_array[i]\n self.original_array = temp_array\n self.capacity = new_capacity", "def SetAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.Refresh()", "def init_capacities(self, G):\n\n if self.capacity_function is not None:\n # Initialize all the capacities, either pass topo object or don't depending on signature\n if len(inspect.signature(self.capacity_function).parameters) == 2:\n for (u, v) in G.edges:\n G.edges[u, v]['capacity'] = self.capacity_function(u, v)\n elif len(inspect.signature(self.capacity_function).parameters) == 3:\n for (u, v) in G.edges:\n G.edges[u, v]['capacity'] = self.capacity_function(u, v, self)\n return G", "def __init__(self, voltage=6, capacity=220, stateBounds = np.array([0.2,0.9]), storedEnergy = None):\n\t\tself.energyCapacity = voltage*capacity\n\t\tself.storedEnergy = storedEnergy or self.energyCapacity/2.0\n\t\tself.minState = stateBounds[0]\n\t\tself.maxState = stateBounds[1]", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def setGravity(self):\n self.fg = .5*self.mass", "def capacity_usage_details(self, capacity_usage_details):\n\n self._capacity_usage_details = capacity_usage_details", "def set_max_vehicle_capacity(self, new_max_vehicle):\n if(new_max_vehicle == None):\n self._logger.write(\"Error! new_max_vehicle cannot be a NoneType\")\n elif(type(new_max_vehicle) != int):\n self._logger.write(\"Error! new_max_vehicle must be of type int\")\n else:\n try:\n self._max_vehicle_capacity = new_max_vehicle\n except Exception as e:\n self._logger.write(\"Error! Could not set the new max_vehicle:\\n %s\" % e)", "def shrink(self):\n self.mass *= 0.8", "def __init__(__self__, *,\n capacity: Optional[int] = None,\n name: Optional[str] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def test_set_glass_capacity__with_invalid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.capacity = -100", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def _resize(self, cap):\n old = self._data\n self._data = [None] * cap\n walk = self._front\n for i in range(self._size):\n self._data[i] = old[walk]\n walk = (walk + 1) % len(old)\n self._front = 0", "def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)", "def set_allow_upscaling(self, allow):\n self.widget.setAllowUpscaling(allow)", "def set_allow_upscaling(self, allow):\n self.widget.SetAllowUpscaling(allow)", "def update_capacity(self, curr_velocity, time, angle):\n gained_energy = self.recharge_rate * time # KWh \n \n energy = self.motor_power(curr_velocity, angle) * time\n\n self.current_capacity += gained_energy - energy", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def setAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.update()", "def SetAntLimit(cls, value=0):\n cls.antLimit = value", "def reserve(self, reserve):\n \n self._reserve = reserve", "def scale_positions_and_cell(self):\n\n taupscl = self.dt / self.taup\n stress = self.atoms.get_stress()\n old_pressure = self.atoms.get_isotropic_pressure(stress)\n scl_pressure = 1.0 - taupscl * self.compressibility / 3.0 * \\\n (self.pressure - old_pressure)\n\n #print \"old_pressure\", old_pressure\n #print \"volume scaling by:\", scl_pressure\n\n cell = self.atoms.get_cell()\n cell = scl_pressure * cell\n self.atoms.set_cell(cell, scale_atoms=True)", "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def capacity(self):\n return self._capacity", "def initialize_supply(self):\n unit_count = 0\n for i in range(self.start_allocation[0 ] -1, self.start_allocation[1]):\n for j in range(len(self.capacity_list[i][1])):\n self.capacity_list[i][1][j] = 1\n unit_count += 1\n self.total_supply -= unit_count", "def set_attributes(self):\n for i, battery in enumerate(sorted(self.batteries.values(),\n key=operator.attrgetter(\"weight\"))):\n setattr(battery, \"cap\", self.caps[self.big_iterations][i])\n if self.caps[self.big_iterations][i] is 450:\n cost = 900\n elif self.caps[self.big_iterations][i] is 900:\n cost = 1350\n else:\n cost = 1800\n setattr(battery, \"cost\", cost)\n battery.capacity = self.caps[self.big_iterations][i]", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def test_set_glass_capacity__with_valid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n numbers = [0, 1, 250, 0.0, 100.5]\n for number in numbers:\n glass.capacity = number\n assert glass.capacity == number", "def _assign_sizes(self):", "def capacity(self):\n return self._capacity", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def minimum_health_capacity(self, minimum_health_capacity):\n self._minimum_health_capacity = minimum_health_capacity", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def _reallocate(self, new_capacity: int) -> None:\n # State S, I, E, SE, or EE\n new_list = [None] * new_capacity # type: List[T]\n self._copy_to_list(new_list)\n if self.count != new_capacity:\n count = self.count\n self._list = new_list\n self._physical_index_start = 0\n self._physical_index_end = count\n self._inverted = False\n else:\n self._list = new_list\n self._physical_index_start = 0\n self._physical_index_end = 0\n self._inverted = True", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def new_capacity_rule(mod, prj, prd):\n return 0", "def scale(self):", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def resize(self, size):\n assert size >= 0 and size <= self._cap, \\\n \"invalid size[%d] for resize\" % (size)\n\n self._size = size", "def apply(self):\n self.grid_size = self.values[0]", "def set_size(self, new_bunch_size):\n self.bunch_size = new_bunch_size", "def Capacity(self) -> int:", "def setMass(self,mass):\n self.mass = mass", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is None:\n name = 'S0'\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is None:\n tier = 'Standard'\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)", "def set_grid(self,ug):\n self.grd=ug\n self.set_topology()", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def knapsack(items, capacity):\r\n pass", "def auto_scaling(self, auto_scaling):\n\n self.container['auto_scaling'] = auto_scaling", "def _set_group_resource(self, _g):\n\n if isinstance(_g, Server):\n return\n\n for _, sg in _g.subgroups.items():\n self._set_group_resource(sg)\n _g.vCPUs += sg.vCPUs\n _g.mem += sg.mem\n _g.local_volume_size += sg.local_volume_size", "def capacitygroup_update(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_update(cmd_ctx, cpc, capacitygroup, options))", "def _resize_array(self, capacity):\n old_array = self._array\n self._array = [None] * capacity\n for index in range(self._size):\n self._array[index] = old_array[index]", "def maximum_over_capacity(self, maximum_over_capacity):\n self._maximum_over_capacity = maximum_over_capacity", "def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)", "def auto(self) -> 'Size':\n self.maximum = 'auto'\n return self", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def shell_allow_upscaling_changed(self, allow):\n self.set_allow_upscaling(allow)", "def shell_allow_upscaling_changed(self, allow):\n self.set_allow_upscaling(allow)", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n family: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if name is not None:\n pulumi.set(__self__, \"name\", name)" ]
[ "0.77368546", "0.770329", "0.7405971", "0.71707654", "0.67850167", "0.6660137", "0.63342935", "0.6240899", "0.6176888", "0.61686575", "0.6136855", "0.6032943", "0.599189", "0.5988754", "0.5920193", "0.5911704", "0.58976525", "0.58910733", "0.58642304", "0.58624625", "0.5813705", "0.58040416", "0.5793481", "0.57781756", "0.5755223", "0.5740172", "0.57317895", "0.5729348", "0.5729046", "0.57021326", "0.5700215", "0.5695261", "0.5685428", "0.56822914", "0.5680668", "0.56618226", "0.5657546", "0.5642251", "0.56398827", "0.56371284", "0.56232667", "0.55963945", "0.55756855", "0.55718637", "0.5569157", "0.5541234", "0.5509835", "0.5479611", "0.547802", "0.5456984", "0.5449284", "0.5444192", "0.5443262", "0.5428451", "0.5423093", "0.5422737", "0.54160666", "0.5414535", "0.540167", "0.53993666", "0.5396018", "0.5378414", "0.537019", "0.53588074", "0.53588074", "0.5356916", "0.5354703", "0.5351363", "0.5348831", "0.5343877", "0.5340474", "0.5336617", "0.5334563", "0.533195", "0.5331178", "0.5324671", "0.5323347", "0.5322598", "0.53078175", "0.53054553", "0.5304289", "0.5288096", "0.528771", "0.5251378", "0.5249586", "0.5248257", "0.52412057", "0.5235549", "0.522002", "0.5218876", "0.52179307", "0.52042", "0.5196106", "0.5195702", "0.519379", "0.5187664", "0.51875496", "0.51875496", "0.5187507", "0.51769364" ]
0.6512873
6
scale down asg by terminating the given node. returns a future indicating when the request completes.
def scale_nodes_in(self, nodes): for node in nodes: self.nodes.remove(node) return self.terminate_instances(node.instance_id for node in nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def stop_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n return await start_long_running_task(\n request,\n _stop_dynamic_service_with_progress,\n task_context=jsonable_encoder(req_ctx),\n path_params=path_params,\n app=request.app,\n service_uuid=f\"{path_params.node_id}\",\n fire_and_forget=True,\n )", "def ex_shutdown_node(self, node):\n # NOTE: This method is here for backward compatibility reasons after\n # this method was promoted to be part of the standard compute API in\n # Libcloud v2.7.0\n return self.stop_node(node=node)", "def finish_node(self, node_name):\n del self._execution_pool[node_name]", "def _shutdown_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.pop()\n self.__shutdown_service(conn, compose_fname, container_name)", "def reboot(self, node):", "def scale_down_application(asg_name):\n if_verbose(\"Scaling down %s.\" % asg_name)\n asg.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=0)", "def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()", "async def shutdown(self) -> int:", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "def release_node(self, node):\n # use the lua script to release the lock in a safe way\n try:\n node._release_script(keys=[self.resource], args=[self.lock_key])\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n pass", "def shutdown_imagenode(self):\n multiprocessing.Process(daemon=True,\n args=((self.pid,)),\n target=self.shutdown_process_by_pid).start()\n sys.exit()", "def shutdown(self, *args):\n return _SALOMERuntime.SalomeNode_shutdown(self, *args)", "def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()", "async def shutdown(self):", "def shutdown(self, *args):\n return _SALOMERuntime.PythonNode_shutdown(self, *args)", "def finish_shutdown(self, kernel_id, waittime=None, pollinterval=0.1, restart=False):", "def main():\n\n try:\n worker_id = get_arg(\"--worker-id\", assert_nonnegative_int)\n master_host = get_arg(\"--master-host\", assert_host)\n master_port = get_arg(\"--master-port\", assert_positive_int)\n scale = get_arg(\"--scale\", assert_pos_float)\n method = get_arg(\"--method\", assert_downscaling_method)\n load_backup = get_arg(\"--load-backup\", assert_bool, default=0)\n number_of_random_walkers = get_arg(\n \"--n-random-walkers\", assert_nonnegative_int, default=1)\n backup_size = get_arg(\n \"--backup-size\", assert_nonnegative_int, default=100)\n walking_iterations = get_arg(\n \"--walking-iterations\", assert_positive_int, default=1)\n\n except AssertionError as e:\n print_error(e)\n print_error(\n \"The downscaling worker expects the following arguments:\\n\"\n \"\\t--worker-id: The id of the worker\\n\"\n \"\\t--master-host: The host of the master\\n\"\n \"\\t--master-port: The port of the master\\n\"\n \"\\t--scale: The scale of the downscaled graph w.r.t. the input graph\\n\"\n \"\\t--method: The method to use for downscaling, `random_walk` or `random_edge`\\n\"\n \"\\t--load-backup: Should the worker load from a backup send by the master\\n\"\n \"\\t--n-random-walkers: Number of random walkers to start with\\n\"\n \"\\t--backup-size: Minimum size of the backup before it will be send to the master during a run, 0 if you want no backups\\n\"\n \"\\t--walking-iterations: The number of steps a random walker sets before the queue will be handled\\n\"\n )\n return\n\n Worker(worker_id, master_host, master_port, scale, method, load_backup,\n number_of_random_walkers, backup_size, walking_iterations)", "def test_cluster_downscale(self):\n logging.info(\"Adding units needed for downscaling test.\")\n self._add_unit(2)\n\n # Remove unit hosting at least one follower\n non_leader_unit = self._get_unit_hosting_ovn(leader=False)\n logging.info(\n \"Removing unit (%s) that hosts OVN follower server.\",\n non_leader_unit\n )\n\n non_leader_sb, non_leader_nb = self._get_server_ids(non_leader_unit)\n self._remove_unit(non_leader_unit)\n self._assert_servers_cleanly_removed(non_leader_sb, non_leader_nb)\n\n # Remove unit hosting at least one leader\n leader_unit = self._get_unit_hosting_ovn(leader=True)\n logging.info(\n \"Removing unit (%s) that hosts OVN leader server.\",\n leader_unit\n )\n\n leader_sb, leader_nb = self._get_server_ids(leader_unit)\n self._remove_unit(leader_unit)\n self._assert_servers_cleanly_removed(leader_sb, leader_nb)", "def _finalize_leaf(self, node):\n node.value = -self.shrinkage * node.sum_gradients / (\n node.sum_hessians + self.splitter.l2_regularization)\n self.finalized_leaves.append(node)", "def autoscale_cmd(timeout, max_count, min_count, batch_count,\n app_srv_ratio):\n while True:\n create_cnt, extra_servers = autoscale.scale(\n max_servers=max_count,\n min_servers=min_count,\n default_app_srv_ratio=app_srv_ratio,\n max_batch=batch_count)\n if create_cnt > 0:\n autoscale.create_n_servers(create_cnt, partition=None)\n\n if extra_servers:\n autoscale.delete_servers_by_name(extra_servers)\n\n time.sleep(timeout)", "def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)", "def request_shutdown(self, restart=False):", "def test_kill_after_resize_call(self, exit_on_deadlock):\n # Test the executor resizing called before a kill arrive\n executor = get_reusable_executor(max_workers=2)\n executor.submit(kill_friend, (next(iter(executor._processes.keys())),\n .1))\n executor = get_reusable_executor(max_workers=1)\n assert executor.submit(id_sleep, 42, 0.).result() == 42", "def destroy_node(self, node):\n params = {'Action': 'TerminateInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_terminate_boolean(res)", "async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()", "def patch_resize(c, graph, node_select):\r\n return c", "def rescale(self, new_throughput):\n\t\treturn type(self)(self.item, self.recipe, new_throughput, self.per_process_outputs)", "def scale(self, sc):\n daskD.wait(self.client.map(_call_scale, self.vecDask, sc=sc, pure=False))\n return self", "def pcp_detach_node_gracefully(self, nid):\n\t\treturn self._pcp_detach_node(nid, True)", "def delayed_termination(func):\n @wraps_py2(func)\n def _delayed_termination(*args, **kwargs):\n with delay_termination():\n return func(*args, **kwargs)\n return _delayed_termination", "def do_delete(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # If node belongs to a cluster, check size constraint\n # before deleting it\n cluster = cm.Cluster.load(self.context, cluster_id)\n current = no.Node.count_by_cluster(self.context, cluster_id)\n desired = current - 1\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n return self.RES_ERROR, result\n\n # handle grace_period\n pd = self.data.get('deletion', None)\n if pd:\n grace_period = pd.get('grace_period', 0)\n if grace_period:\n eventlet.sleep(grace_period)\n\n res = self.entity.do_delete(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # check if desired_capacity should be changed\n do_reduce = True\n params = {}\n pd = self.data.get('deletion', None)\n if pd:\n do_reduce = pd.get('reduce_desired_capacity', True)\n if do_reduce and res:\n params = {'desired_capacity': desired}\n cluster.eval_status(self.context, consts.NODE_DELETE, **params)\n\n if not res:\n return self.RES_ERROR, 'Node deletion failed.'\n\n return self.RES_OK, 'Node deleted successfully.'", "def converge_scaling_group(self, request):\n\n def can_converge(group, state):\n if state.paused:\n raise GroupPausedError(group.tenant_id, group.uuid, \"converge\")\n return state\n\n if tenant_is_enabled(self.tenant_id, config_value):\n group = self.store.get_scaling_group(\n self.log, self.tenant_id, self.group_id)\n return controller.modify_and_trigger(\n self.dispatcher,\n group,\n bound_log_kwargs(self.log),\n can_converge)\n else:\n request.setResponseCode(404)", "def test_resize_down_revert(self):\n # devstack's m1.tiny and m1.small have different size disks so we\n # can't use those as you can't resize down the disk. So we have to\n # create our own flavors.\n larger_flavor, smaller_flavor = self._create_resize_down_flavors()\n # Now create the server with the larger flavor.\n server_id = self._create_server(flavor=larger_flavor).id\n # get the starting quota now that we've created a server\n starting_usage = self._get_absolute_limits()\n # now resize down\n self.nova('resize',\n params='%s %s --poll' % (server_id, smaller_flavor))\n resize_usage = self._get_absolute_limits()\n # compare the starting usage against the resize usage; with counting\n # quotas in the server there are no reservations, so the\n # usage changes after the resize happens before it's confirmed.\n self._compare_quota_usage(starting_usage, resize_usage)\n # now revert the resize\n self.nova('resize-revert', params='%s' % server_id)\n # we have to wait for the server to be ACTIVE before we can check quota\n self._wait_for_state_change(server_id, 'active')\n # get the final quota usage which will be different from the resize\n # usage since we've reverted back *up* to the original flavor; the API\n # code checks quota again if we revert up in size\n revert_usage = self._get_absolute_limits()\n self._compare_quota_usage(resize_usage, revert_usage)", "def handle_termination(self):\n pass", "def autoscale_down(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]:\n return pulumi.get(self, \"autoscale_down\")", "def finish_shutdown(self, waittime=None, pollinterval=0.1, restart=False):", "async def adapt(self) -> None:\n if self._adapting: # Semaphore to avoid overlapping adapt calls\n return\n self._adapting = True\n status = None\n\n try:\n target = await self.safe_target()\n recommendations = await self.recommendations(target)\n\n if recommendations[\"status\"] != \"same\":\n self.log.append((time(), dict(recommendations)))\n\n status = recommendations.pop(\"status\")\n if status == \"same\":\n return\n if status == \"up\":\n await self.scale_up(**recommendations)\n if status == \"down\":\n await self.scale_down(**recommendations)\n except OSError:\n if status != \"down\":\n logger.error(\"Adaptive stopping due to error\", exc_info=True)\n self.stop()\n else:\n logger.error(\n \"Error during adaptive downscaling. Ignoring.\", exc_info=True\n )\n finally:\n self._adapting = False", "def shutdown(self):\n self.req_shutdown = True", "def shutdown(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Shutting down local node\"\n return ret\n\n __salt__[\"trafficserver.shutdown\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Shutdown local node\"\n return ret", "def autoscale_scale_down_non_service_tasks(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"autoscale_scale_down_non_service_tasks\")", "def main_endpoint(request, node_id):\n status = node.infotable['status']\n request.setResponseCode(status)\n\n latency = node.infotable['latency']\n if latency > 0:\n time.sleep(latency)\n\n node.make_requests()\n\n return node.node_id", "def scale_out(self, *args, **kwargs):\n pass", "def resume_scaling_group(self, request):\n group = self.store.get_scaling_group(\n self.log, self.tenant_id, self.group_id)\n return controller.resume_scaling_group(\n self.log, transaction_id(request), group, self.dispatcher)", "async def _shutdown(self, *args, **kwargs):\n return self", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "async def test_early_exit(self):\n n = Node()\n run_task = asyncio.create_task(n.run_node())\n await asyncio.sleep(0)\n self.assertFalse(n.check_alive())\n n.exit_node()\n await n.wait_running()\n await n.wait_stopped()\n await run_task\n await self._check_exited_node(n)", "def shutdown(self) -> None:\n scalesets = Scaleset.search_by_pool(self.name)\n nodes = Node.search(query={\"pool_name\": [self.name]})\n if not scalesets and not nodes:\n logging.info(\"pool stopped, deleting: %s\", self.name)\n\n self.state = PoolState.halt\n self.delete()\n return\n\n for scaleset in scalesets:\n scaleset.state = ScalesetState.shutdown\n scaleset.save()\n\n for node in nodes:\n node.set_shutdown()\n\n self.save()", "def shutdownVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/shutdown\" % (node,vmid), post_data)\n return data", "def update(self) -> None:\n\n \n #If time to live is 0\n if self.ttl == 0:\n\n #Kill itself\n self.kill()\n return\n\n #Otherwise\n else:\n\n #Reduce time to live\n self.ttl -= 1\n\n #Call superclass update\n return super().update()", "def free_child(self):\n try:\n self.terminate()\n self.kill()\n self.wait()\n except:\n pass", "def downscale_resolution(self, downscale_resolution):\n\n self._downscale_resolution = downscale_resolution", "def stop(self: AutoScalingCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.autoscaler.stop(wait=wait, timeout=timeout)\n super().stop(wait=wait, timeout=timeout)", "def evalOnSubTreeEnd(self, node):\n\n return None", "def spinDown(self):\n\n self.emr_client.terminate_job_flows(JobFlowIds=[self.job_flow_id])\n # don't forget to tip the waiter :)\n spinDown_waiter = self.emr_client.get_waiter('cluster_terminated')\n try:\n spinDown_waiter.wait(ClusterId=self.clusID)\n\n except WaiterError as e:\n if 'Max attempts exceeded' in e.message:\n print('EMR Step did not complete in 30 minutes')\n else:\n print(e.message)", "def async_infer(self, frame, req_id):\n\n in_frame, scale_h, scale_w = self._prepare_frame(frame)\n self.last_scales = scale_h, scale_w\n\n super().async_infer(in_frame, req_id)", "def request_shutdown(self, kernel_id, restart=False):", "def destroy_node(self):\n driver = self.driver\n driver.ex_detach_floating_ip_from_node(self.node, self.floating_ip)\n driver.destroy_node(self.node)\n sleep(15)\n for volume in self.volumes:\n driver.destroy_volume(volume)", "def do_node_tear_down(self, context, node_id):\n self.cast(context,\n self.make_msg('do_node_tear_down',\n node_id=node_id))", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def clear_node():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\")\n else:\n cmd = _traffic_line(\"-c\")\n\n return _subprocess(cmd)", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def converge(log, transaction_id, config, scaling_group, state, launch_config,\n policy, config_value=config_value):\n if tenant_is_enabled(scaling_group.tenant_id, config_value):\n # For convergence tenants, find delta based on group's desired\n # capacity\n delta = apply_delta(log, state.desired, state, config, policy)\n if delta == 0:\n # No change in servers. Return None synchronously\n return None\n else:\n return defer.succeed(state)\n\n # For non-convergence tenants, the value used for desired-capacity is\n # the sum of active+pending, which is 0, so the delta ends up being\n # the min entities due to constraint calculation.\n delta = calculate_delta(log, state, config, policy)\n execute_log = log.bind(server_delta=delta)\n\n if delta == 0:\n execute_log.msg(\"no change in servers\")\n return None\n elif delta > 0:\n execute_log.msg(\"executing launch configs\")\n deferred = execute_launch_config(\n execute_log, transaction_id, state, launch_config,\n scaling_group, delta)\n else:\n # delta < 0 (scale down)\n execute_log.msg(\"scaling down\")\n deferred = exec_scale_down(execute_log, transaction_id, state,\n scaling_group, -delta)\n\n deferred.addCallback(_do_convergence_audit_log, log, delta, state)\n return deferred", "def power_off_node(self, node):\n msg = 'Node {0} has not become offline after hard shutdown'.format(\n node.name)\n logger.info('Power off node %s', node.name)\n node.destroy()\n logger.info('Wait a %s node offline status', node.name)\n helpers.wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(\n node)['online'], timeout=60 * 5, timeout_msg=msg)", "def resize_for_new_nodes(new_total_nodes, k8s, cluster, test=False):\n if confirm((\"Resizing up to: %d nodes\" % new_total_nodes)):\n scale_logger.info(\"Resizing up to: %d nodes\", new_total_nodes)\n if not test:\n cluster.add_new_node(new_total_nodes)\n wait_time = 130\n scale_logger.debug(\n \"Sleeping for %i seconds for the node to be ready for populating\", wait_time)\n time.sleep(wait_time)\n populate(k8s)", "def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True", "def scale_down_options(self) -> Iterable[ClusterNodeMetadata]:\n raise NotImplementedError()", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def _shutdown(self):", "def terminate(self) -> asyncio.Future[None]:\n self._check_closed()\n fut: Future[None] = Future()\n self._watch_q.put_nowait({\"op\": \"terminate\", \"future\": fut})\n return fut", "def stop_slave_web():\n print(\"Stopping slave web\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/web\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the web dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the web process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True", "def timeout_scalefactor(self, initial_factor):\n result = initial_factor\n for key, value in SCALE_FACTOR.items():\n try:\n if getattr(self, key):\n result *= value\n except AttributeError:\n raise Exception(INITIALIZATION_ERROR % key)\n if self.arch in SLOW_ARCHS:\n result *= 4.5\n return result", "def scale_down(image:np.array)->np.array:\n src = image\n scale_percent = 25\n width = int(src.shape[1] * scale_percent / 100)\n height = int(src.shape[0] * scale_percent / 100)\n dsize = (width, height)\n output = cv2.resize(src, dsize)\n return output", "def resizeLXCContainer(self,node,vmid,post_data):\n data = self.connect('put',\"nodes/%s/lxc/%s/resize\" % (node,vmid), post_data)\n return data", "def test_create_node_shutdown_item(self):\n pass", "def StopRequestHook(ref, args, request):\n del ref\n del args\n stop_request = GetMessagesModule().StopNodeRequest()\n request.stopNodeRequest = stop_request\n return request", "def delete_on_node(\n node_ip: str, path: str, return_future: bool = False\n) -> Union[bool, ray.ObjectRef]:\n\n node_id = _get_node_id_from_node_ip(node_ip)\n\n delete_task = _remote_delete_path.options(num_cpus=0, **_force_on_node(node_id))\n future = delete_task.remote(path)\n\n if return_future:\n return future\n\n return ray.get(future)", "def scale_root(self) -> int:\r\n ...", "def shutdown_cluster(self):\n self.cluster.shutdown()", "def finish_task(self):\n self.report_total_usage()\n if self.retry:\n self.retry = False\n self.curr_retries = 0\n self.state = \"done\"\n self.ready_for_step += 1\n self.RM.release_allocation(self, self.using.nodes)\n self.using.clear()\n self.curr_exec_time = 0\n # log message\n self.fwk.logEvent(self.sim.name, self.name, \"finish_task\", \"finished running\")", "def cluster_shutdown():\n map(shutdown, cluster)", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def shutdown(self, timeout=30.0):\n self._shutting_down = True\n self._shutdown_event.set(True)\n unset = shutdown_or_die(timeout) # Failsafe in case the following doesn't work\n elapsed = self.join_children(timeout)\n #self.stop()\n\n unset()\n return elapsed", "def _termination_handler(self, signum, frame):\n print '[i] Shutting down...'\n self.sensor.close()\n sys.exit(1)", "async def finalize(self):\n self._req_event_emitter.disconnect()\n await self._task", "async def _handle_ha_stop(self, _):\n self._unsub_stop = None\n await self.shutdown()", "async def async_resize(self, tagOrId, scale, x, y, time, fps=24, update=True):\n scale *= -1\n timeIncrement, moveIncrement = 1 / fps, scale / time / fps\n\n counter = 0\n while time * fps > counter * timeIncrement * fps:\n counter += 1\n\n self.resize(tagOrId, moveIncrement, x, y)\n\n if update:\n self.tk.call(\"update\")\n await asyncio.sleep(timeIncrement)", "def terminate_job_run(\n self,\n ) -> Callable[\n [cloud_deploy.TerminateJobRunRequest], cloud_deploy.TerminateJobRunResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"terminate_job_run\" not in self._stubs:\n self._stubs[\"terminate_job_run\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/TerminateJobRun\",\n request_serializer=cloud_deploy.TerminateJobRunRequest.serialize,\n response_deserializer=cloud_deploy.TerminateJobRunResponse.deserialize,\n )\n return self._stubs[\"terminate_job_run\"]", "def scale_datanode(self, datanode_count):\n call([Command.docker_compose, \"-f\", self.docker_compose_file,\n \"up\", \"-d\", \"--scale\", \"datanode=\" + datanode_count])", "def test_node_graceful_shutdown(self, proc_info, controller_node):\n launch_testing.asserts.assertExitCodes(proc_info, process=controller_node)", "def shutdown():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><shutdown><system></system></shutdown></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def replace_shutdown_node_test(self):\n self._replace_node_test(gently=True)", "def finish():\n if not channel.closed():\n channel.close()\n loop.remove_timeout(timeout)\n kernel.remove_restart_callback(on_restart_failed, 'dead')", "async def _create_upscale(input_file, *, output_file, ratio):\n # If the output file already exists, use it if the timestamp matches. If the\n # timestamps are different, regenerate it.\n if output_file.exists():\n input_stat = input_file.stat()\n output_stat = output_file.stat()\n if input_stat.st_mtime == output_stat.st_mtime:\n return\n\n # Reencode the image we're upscaling to an RGB BMP. The upscaler isn't very robust\n # at handling various files and file paths, so this lets us give it a simple, controlled\n # input that won't confuse it. Bake any transparency (it doesn't handle transparency)\n # and convert to RGB.\n input_temp_file = misc.get_temporary_path('.bmp')\n output_temp_file = misc.get_temporary_path('.jpg')\n\n with input_file.open('rb') as f:\n f = remove_photoshop_tiff_data(f)\n image = Image.open(f)\n image.load()\n \n # If we tell PIL to convert P to RGB and the palette has transparency, it seems\n # to use an undefined RGB color for transparency. We just want it to comp onto\n # black. To avoid this, convert to RGBA, so it'll use the black underlay path\n # below.\n if image.mode == 'P':\n image = image.convert('RGBA')\n\n # Bake RGBA to RGB with a black background.\n if image.mode == 'RGBA':\n bg = Image.new('RGBA', image.size, (0,0,0,255))\n image = Image.alpha_composite(bg, image)\n\n # Convert anything else to RGB.\n if image.mode != 'RGB':\n image = image.convert('RGB')\n\n with input_temp_file.open('w+b') as output:\n image.save(output, format='bmp')\n\n try:\n # This is a GPU upscaler. Only process one image at a time, so we don't spam GPU jobs\n # if the client tries to load too aggressively. Doing it here allows the above check\n # to complete without blocking if the image is already cached.\n async with _lock:\n assert ratio in (2,3,4)\n result = await _run_upscale([\n _upscaler,\n '-s', str(ratio),\n '-i', str(input_temp_file),\n '-o', str(output_temp_file),\n ])\n\n if result != 0:\n raise Exception('Error upscaling image')\n\n # The upscaler doesn't return a result code.\n if not output_temp_file.exists():\n raise Exception('Error upscaling image (no file generated)')\n\n shutil.copyfile(output_temp_file, output_file)\n finally:\n # Clean up.\n input_temp_file.unlink(missing_ok=True)\n output_temp_file.unlink(missing_ok=True)\n\n # Set the cache file's timestamp to match the input file, so we can tell if the input\n # file changes.\n st = input_file.stat()\n os.utime(output_file, (st.st_atime, st.st_mtime))", "def terminate(self):", "def cluster_release(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.release_cluster(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster release failed\")", "def shutdown(self):\n print(\"DEPRECATED shutdown\")\n return self._operations.shutdown()", "def down(self, connection):\n raise NotImplementedError" ]
[ "0.5773211", "0.5584699", "0.5190178", "0.51015365", "0.49526966", "0.4843822", "0.47758818", "0.47302267", "0.47145423", "0.47145423", "0.46995267", "0.46891746", "0.4611184", "0.45706004", "0.45680726", "0.4559255", "0.4542594", "0.45021996", "0.44928265", "0.4487056", "0.4486319", "0.4485596", "0.44809014", "0.44680598", "0.4465019", "0.44606575", "0.4451243", "0.44430953", "0.44369414", "0.4431504", "0.44271618", "0.44094962", "0.43927935", "0.4392562", "0.43886918", "0.4360579", "0.4356437", "0.434953", "0.43424246", "0.4339953", "0.43070233", "0.42983693", "0.428914", "0.4284183", "0.42755246", "0.42746255", "0.42746255", "0.42746255", "0.4266683", "0.4264745", "0.42615625", "0.4258445", "0.42569903", "0.42558748", "0.42546323", "0.4251844", "0.425181", "0.42455348", "0.42308763", "0.4227342", "0.4226394", "0.4223333", "0.42088106", "0.42055216", "0.4204639", "0.42020512", "0.4199185", "0.41955313", "0.4192904", "0.41876724", "0.4173969", "0.41732875", "0.4170811", "0.41703737", "0.4167731", "0.41673142", "0.41660026", "0.4164206", "0.41600803", "0.41515428", "0.41471303", "0.414357", "0.4136239", "0.4134819", "0.41278732", "0.41235706", "0.4117501", "0.4114091", "0.41097382", "0.4106858", "0.41030702", "0.4102082", "0.40946195", "0.40871787", "0.4084348", "0.40838188", "0.40808952", "0.40719196", "0.40695578", "0.4067661" ]
0.45100704
17
A custom python read function for interfacing with nii image files.
def read_fn(file_references, mode, params=None): def _augment(img): """An image augmentation function""" return flip(img, axis=2) for f in file_references: subject_id = f[0] data_path = '../../../data/IXI_HH/1mm' # Read the image nii with sitk t1_fn = os.path.join(data_path, '{}/T1_1mm.nii.gz'.format(subject_id)) t1 = sitk.GetArrayFromImage(sitk.ReadImage(str(t1_fn))) # Normalise volume images t1 = whitening(t1) # Create a 4D image (i.e. [x, y, z, channels]) images = np.expand_dims(t1, axis=-1).astype(np.float32) if mode == tf.estimator.ModeKeys.PREDICT: yield {'features': {'x': images}} # Augment if used in training mode if mode == tf.estimator.ModeKeys.TRAIN: images = _augment(images) # Check if the reader is supposed to return training examples or full # images if params['extract_examples']: images = extract_random_example_array( image_list=images, example_size=params['example_size'], n_examples=params['n_examples']) for e in range(params['n_examples']): yield {'features': {'x': images[e].astype(np.float32)}} else: yield {'features': {'x': images}} return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_image(path, file_format='nii.gz'):\n path = path + '.' + file_format\n if file_format == 'npy':\n image = np.load(path)\n elif file_format == 'npz':\n image = np.load(path)['arr_0']\n elif file_format in ('png', 'jpg'):\n image = np.array(imageio.imread(path))\n elif file_format == 'dcm':\n image = np.array(imageio.volread(path, 'DICOM'))\n elif file_format in ('nii', 'nii.gz'):\n image = nib.load(path).get_data()\n else:\n raise ValueError('invalid --input_type : {}'.format(file_format))\n\n return image", "def iread(filename, *args, verbose=True, **kwargs):\n\n # determine if file is valid:\n # assert isinstance(filename, str), 'filename must be a string'\n\n\n # TODO read options for image\n # opt = {\n # 'uint8': False,\n # 'single': False,\n # 'double': False,\n # 'grey': False,\n # 'grey_709': False,\n # 'gamma': 'sRGB',\n # 'reduce': 1.0,\n # 'roi': None\n # }\n\n if isinstance(filename, str) and (filename.startswith(\"http://\") or filename.startswith(\"https://\")):\n # reading from a URL\n\n resp = urllib.request.urlopen(filename)\n array = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv.imdecode(array, -1)\n print(image.shape)\n return (image, filename)\n\n elif isinstance(filename, (str, Path)):\n # reading from a file\n\n path = Path(filename).expanduser()\n\n if any([c in \"?*\" for c in str(path)]):\n # contains wildcard characters, glob it\n # recurse and return a list\n # https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib\n \n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n\n if len(pathlist) == 0 and not path.is_absolute():\n # look in the toolbox image folder\n path = Path(__file__).parent / \"images\" / path\n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n \n if len(pathlist) == 0:\n raise ValueError(\"can't expand wildcard\")\n\n imlist = []\n pathlist.sort()\n for p in pathlist:\n imlist.append(iread(p, **kwargs))\n return imlist\n\n else:\n # read single file\n\n if not path.exists():\n if path.is_absolute():\n raise ValueError(f\"file {filename} does not exist\")\n # file doesn't exist\n # see if it matches the supplied images\n path = Path(__file__).parent / \"images\" / path\n\n if not path.exists():\n raise ValueError(f\"file {filename} does not exist, and not found in supplied images\")\n\n # read the image\n # TODO not sure the following will work on Windows\n im = cv.imread(path.as_posix(), **kwargs) # default read-in as BGR\n\n if im is None:\n # TODO check ValueError\n raise ValueError(f\"Could not read {filename}\")\n\n return (im, str(path))\n\n elif islistof(filename, (str, Path)):\n # list of filenames or URLs\n # assume none of these are wildcards, TODO should check\n out = []\n for file in filename:\n out.append(iread(file, *args))\n return out\n else:\n raise ValueError(filename, 'invalid filename')", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return nimg.get_data(), nimg.affine, nimg.header", "def test_read_namespaced_image_stream(self):\n pass", "def load_nifti(fname, reorient=True):\n img = nib.load(fname)\n if reorient:\n img = nib.as_closest_canonical(img)\n return(img.get_data())", "def read_nifti_file(filepath):\n # Read file\n scan = nib.load(filepath)\n # Get raw data\n scan = scan.get_fdata()\n return scan", "def load_nifti_image(filename):\n img = nib.load(filename)\n data = img.get_data()\n return data", "def test_read_namespaced_image_stream_image(self):\n pass", "def test_read_image(self):\n pass", "def imread_wrapper(file):\n p = Path(file)\n if len(p.suffixes) == 2:\n series = int(Path(p.stem).stem)\n else:\n series = int(p.stem)\n return imread(file, series=series)", "def imread(filename, *args, **kwargs):\r\n try:\r\n netpbm = NetpbmFile(filename)\r\n image = netpbm.asarray()\r\n finally:\r\n netpbm.close()\r\n return image", "def imread(fname):\r\n return skimage.io.imread(fname)", "def read_image(fileame, representation):\n validate_representation(representation)\n\n im = imread(fileame)\n if representation == 1 and is_rgb(im):\n # We should convert from Grayscale to RGB\n im = rgb2gray(im)\n return im.astype(np.float32)\n\n return normlized_image(im)", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return np.asanyarray(nimg.dataobj), nimg.affine, nimg.header", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image", "def read_image(self, filePath):\n if filePath.endswith(\".dcm\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.expand_dims(image[0,:,:], -1)\n elif filePath.endswith(\".png\"):\n image = cv2.imread(filePath)\n image = np.array(image, dtype = \"int16\")\n elif filePath.endswith(\".mha\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.transpose(image,(1,2,0))\n return image", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def nircam_image_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='NIRCam Image')\n data.header = hdulist[0].header\n wcs = WCS(hdulist[0].header)\n\n # drop the last axis since the cube will be split\n data.coords = coordinates_from_wcs(wcs.sub(2))\n data.add_component(hdulist[0].data[0], 'Flux')\n data.add_component(hdulist[0].data[1], 'Uncertainty')\n\n return data", "def imgRead(filename: str, representation: int) -> np.ndarray:\r\n if representation==LOAD_GRAY_SCALE:\r\n img = cv2.imread(filename,0)\r\n else:\r\n img = cv2.imread(filename)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n return img.astype('uint8')", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")", "def read_img(components):\n\n img_buf = open(components[0], 'rb').read()\n\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n\n return components", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def imread(filename):\n filename = process(filename)\n ext = os.path.splitext(filename)[1]\n if ext.lower() == '.pfm':\n return load_pfm(filename)\n elif ext.lower() == '.dng':\n return load_dng(filename)\n else:\n loaded = cv2.imread(filename, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)\n if loaded is None:\n raise IOError('Could not read {0}'.format(filename))\n else:\n return loaded", "def read():\n # TODO", "def load_nifti(file_path, dtype=np.float32, incl_header=False, z_factor=None, mask=None):\n \n img = nib.load(file_path)\n struct_arr = img.get_data().astype(dtype)\n \n # replace infinite values with 0\n if np.inf in struct_arr:\n struct_arr[struct_arr == np.inf] = 0.\n \n # replace NaN values with 0 \n if np.isnan(struct_arr).any() == True:\n struct_arr[np.isnan(struct_arr)] = 0.\n \n if mask is not None:\n struct_arr *= mask\n \n if z_factor is not None:\n struct_arr = zoom(struct_arr, z_factor)\n \n if incl_header:\n return struct_arr, img\n else:\n return struct_arr", "def read_image(path):\n img = misc.imread(path)\n return img", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def load_image_as_nd_array(image_name):\n if (image_name.endswith(\".nii.gz\") or image_name.endswith(\".nii\") or\n image_name.endswith(\".mha\")):\n image_dict = load_nifty_volume_as_4d_array(image_name)\n elif(image_name.endswith(\".jpg\") or image_name.endswith(\".jpeg\") or\n image_name.endswith(\".tif\") or image_name.endswith(\".png\")):\n image_dict = load_rgb_image_as_3d_array(image_name)\n else:\n raise ValueError(\"unsupported image format\")\n return image_dict", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def read_img(img_path): \n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def load(self, file, lazy=True):\n # individual files for each slice\n # we got one file, nice!\n \n if not lazy:\n\n if file in self.imagedict.keys():\n return self.imagedict[file]\n else:\n self.imagedict[file] = self.load(file, True)\n self.imagedict[file] *= 1\n return self.imagedict[file]\n \n else:\n \n ending = splitext(file)[-1].lower()\n if ending in ['.nii', '.hdr', '.nii.gz', '.gz']:\n if self.correct_orientation:\n vol = ni.open_image(file, verbose=False)\n self.affine = vol.get_aligned_transformation(\"RAS\")\n data = vol.aligned_volume\n else:\n f = nib.load(file)\n self.affine = f.affine\n self.pixdim = np.asarray(f.header['pixdim'][1:])\n data = f.get_data()\n return data\n # elif ending in ['.nrrd', '.nhdr']:\n # if self.correct_orientation:\n # vol = nr.open_image(file, verbose=False)\n # self.affine = vol.get_aligned_transformation(\"RAS\")\n # f = vol.aligned_volume\n # else:\n # try:\n # f, h = nrrd.read(file)\n # except:\n # print('could not read file {}'.format(file))\n # logging.getLogger('data').error('could not read file {}'.format(file))\n # raise Exception('could not read file {}'.format(file))\n # self.affine = np.eye(4)\n # return f\n # elif ending in ['.dcm']:\n # f = pydicom.dcmread(file).pixel_array\n # return f\n # elif ending in ['.mha', '.mhd']:\n # f = skio.imread(file, plugin='simpleitk')\n # self.affine = np.eye(4)\n # return f\n elif ending in ['.png', '.pgm', '.pnm']:\n data = imread(file)\n if len(data.shape) > 2:\n return np.transpose(data, [2, 0, 1])\n else:\n return data\n return imread(file)\n else:\n raise Exception('{} not known'.format(ending))", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def test_read_namespaced_image_stream_tag(self):\n pass", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def read_images_lenstool(image_file = 'image.all'):\n #FIXME - is it a good idea to keep this one line function?\n x_img = np.loadtxt(image_file, usecols = (1, 2))\n return x_img", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def read_image(filename):\n img = Image.open(filename)\n im = np.array(img)\n return im", "def check_niimg(\n niimg,\n ensure_ndim=None,\n atleast_4d=False,\n dtype=None,\n return_iterator=False,\n wildcards=True,\n):\n from ..image import new_img_like # avoid circular imports\n\n niimg = stringify_path(niimg)\n\n if isinstance(niimg, str):\n if wildcards and ni.EXPAND_PATH_WILDCARDS:\n # Ascending sorting + expand user path\n filenames = sorted(glob.glob(os.path.expanduser(niimg)))\n\n # processing filenames matching globbing expression\n if len(filenames) >= 1 and glob.has_magic(niimg):\n niimg = filenames # iterable case\n # niimg is an existing filename\n elif [niimg] == filenames:\n niimg = filenames[0]\n # No files found by glob\n elif glob.has_magic(niimg):\n # No files matching the glob expression, warn the user\n message = (\n \"No files matching the entered niimg expression: \"\n \"'%s'.\\n You may have left wildcards usage \"\n \"activated: please set the global constant \"\n \"'nilearn.EXPAND_PATH_WILDCARDS' to False to \"\n \"deactivate this behavior.\"\n ) % niimg\n raise ValueError(message)\n else:\n raise ValueError(f\"File not found: '{niimg}'\")\n elif not os.path.exists(niimg):\n raise ValueError(f\"File not found: '{niimg}'\")\n\n # in case of an iterable\n if hasattr(niimg, \"__iter__\") and not isinstance(niimg, str):\n if return_iterator:\n return _iter_check_niimg(\n niimg, ensure_ndim=ensure_ndim, dtype=dtype\n )\n return concat_niimgs(niimg, ensure_ndim=ensure_ndim, dtype=dtype)\n\n # Otherwise, it should be a filename or a SpatialImage, we load it\n niimg = load_niimg(niimg, dtype=dtype)\n\n if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1:\n # \"squeeze\" the image.\n data = _safe_get_data(niimg)\n affine = niimg.affine\n niimg = new_img_like(niimg, data[:, :, :, 0], affine)\n if atleast_4d and len(niimg.shape) == 3:\n data = _get_data(niimg).view()\n data.shape = data.shape + (1,)\n niimg = new_img_like(niimg, data, niimg.affine)\n\n if ensure_ndim is not None and len(niimg.shape) != ensure_ndim:\n raise DimensionError(len(niimg.shape), ensure_ndim)\n\n if return_iterator:\n return (_index_img(niimg, i) for i in range(niimg.shape[3]))\n\n return niimg", "def read_img(filename, use_flat_32_type, one_channel, flip):\r\n\r\n if one_channel:\r\n img = cv2.imread(filename, -1)\r\n else:\r\n img = cv2.imread(filename)\r\n if img is None:\r\n print('in conv_data_generator.py - read_img function - image is None ; filename=', filename)\r\n return img\r\n if use_flat_32_type & (img is not None):\r\n img = img.astype(np.float32)\r\n if img.shape[:2] == (288, 512):\r\n if flip:\r\n img = cv2.flip(img, 1)\r\n return img\r\n else:\r\n print(\"something is strange here - input does not follow the normal habbit - please check or cvhange the code according to input size\")\r\n return False", "def read_image(filename, representation=1):\n\n # if representation == 1:\n # # converting to gray\n # im = cv2.imread(filename, cv2.IMREAD_ANYDEPTH)\n # im = im / 65535\n # else:\n # if representation == 2:\n # im = cv2.imread(filename, cv2.IMREAD_COLOR)\n # # setting the image's matrix to be between 0 and 1\n # im = im / 65535\n # return im\n\n im = imread(filename)\n if representation == 1:\n # converting to gray\n im = rgb2gray(im) / 255\n else:\n if representation == 2:\n im = im.astype(np.float64)\n # setting the image's matrix to be between 0 and 1\n im = im / 255\n return im", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def imread(fname):\n try:\n fp = open(fname, 'rb')\n im = Image.open(fp)\n except:\n sys.stderr.write('IOException: Invalid input type on '+fname+'\\n')\n sys.exit(1)\n else:\n if im.format not in FILETYPES:\n sys.stderr.write('IOException: Invalid image type\\n')\n sys.exit(1)\n \n fa = np.array(im.convert('F'))\n im = im.convert('RGB')\n wa = np.array(im)\n \n fp.close()\n\n return fa, wa", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def read(self, filename):\n raise NotImplementedError", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def read_image(filename, representation):\n\n color_flag = True #if RGB image\n image = imread(filename)\n\n float_image = image.astype(np.float64)\n\n if not np.all(image <= 1):\n float_image /= NORMALIZE #Normalized to range [0,1]\n\n if len(float_image.shape) != 3 : #Checks if RGB or Grayscale\n color_flag = False\n\n if color_flag and representation == 1 : #Checks if need RGB to Gray\n return skimage.color.rgb2gray(float_image)\n\n # Same coloring already\n return float_image", "def readImg(filename, h1, h2, w1, w2):\n img = cv2.imread(filename, 1)\n # plt.figure()\n # plt.imshow(img)\n img = img[h1:h2, w1:w2]\n return img", "def read_image_file(file_name):\n return torch.from_numpy(np.asarray(Image.open(file_name).convert('L')))", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def load_npy_nii(filename):\n import numpy as np\n import nibabel\n\n if filename_type(filename) == 'nii':\n return nibabel.load(filename)\n\n elif filename_type(filename) == 'npy':\n return np.load(filename)\n\n return None", "def reader(self, idx):\n # Get the path of input image and groundtruth mask.\n input_path, gtmask_path = self.imgs[idx]\n input_img, gt_img = self.loader(input_path, gtmask_path)\n return input_img, gt_img", "def read_image(filename, representation):\n im = imread(filename)\n if representation == GS_REP:\n im = rgb2gray(im)\n im = np.divide(im, MAX_VALUE - 1)\n return im", "def read_image(self, ifd):\n ifd.img_data = np.array([], dtype='uint8')\n strips = ifd.get_strips() # [(strip_offset, strip_byte_count)]\n for strip in strips:\n ifd.img_data = np.append(ifd.img_data, self.tif_file.read(size=strip[1], location=strip[0]))", "def read_image(fs, img_path, mode=\"rb\"):\n f = fs.open(img_path, mode)\n pil_img = Image.open(f)\n img_array = np.asarray(pil_img, dtype=np.uint8)\n f.close()\n return img_array", "def get_input(path):\n img = imread(path)\n return img", "def read_img(img_path):\n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def get_input(self, idx):\r\n img_filename = self.root / \"images\" / self._image_array[idx]\r\n x = Image.open(img_filename)\r\n return x", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def _read_image_from_file(file_name):\n image_file = open(file_name, 'rb')\n image = image_file.read()\n image_file.close()\n return image", "def read_data(reader: UFOReader, filename: str) -> bytes:\n return reader.readImage(filename) # type: ignore", "def test_imibread(self):\n gen = imibread(TEST_MIB)\n arr = next(gen)\n self.assertEqual(arr.shape, (256, 256))\n self.assertEqual(arr.dtype, np.dtype(\">u2\"))", "def read(self, fileobj):\n raise NotImplementedError", "def read_image(filename, representation):\n image = scipy.misc.imread(filename)\n if int(representation) == 1:\n image = rgb2gray(image)\n return img_as_float(image)", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:\n raise OSError(errno.EIO, os.strerror(errno.EIO))\n return buf", "def Read(image_path):\n # use cv2.imread() to read an images.\n # syntax : cv2.imread(filename, flag=None)\n return cv2.imread(image_path, 0)", "def test_read(self):\n for line in TESTIMAGES.split('\\n'):\n vals = line.strip().split()\n name = vals[0]\n logger.debug(\"Testing file %s\" % name)\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = raxisimage()\n obj.read(os.path.join(os.path.dirname(self.mar), name))\n\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin [%s,%s]\" % (mini, obj.getmin()))\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax [%s,%s]\" % (maxi, obj.getmax()))\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean [%s,%s]\" % (mean, obj.getmean()))\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev [%s,%s]\" % (stddev, obj.getstddev()))\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")\n self.assertNotEqual(obj.dim1, obj.dim2, \"dim2!=dim1\")", "def read(self):", "def read_img(img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, img_id + '.jpg'), target_size=size)\n # img = image.img_to_array(img)\n return img", "def read(filename):\r\n with open(filename, \"rb\") as f:\r\n data = pic.load(f)\r\n return data", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def read_image(filename, representation):\n image = imread(filename)\n\n # Convert image to grayscale, if required.\n if representation == GRAY_OUT and image.ndim == RGB_DIM:\n image = rgb2gray(image)\n else:\n # Normalize image to [0, 1].\n image = image.astype(np.float64)\n image /= TO_FRACTION\n return image", "def load_nii_in_ras(fname):\n nii = nib.load(fname)\n nii = nib.as_closest_canonical(nii)\n vol = nii.get_fdata()\n\n return vol, nii.affine", "def read_img(filename) -> Tuple[np.ndarray, np.ndarray]:\n img = cv2.imread(filename, 3)\n labimg = cv2.cvtColor(cv2.resize(img, (config.IMAGE_SIZE, config.IMAGE_SIZE)), cv2.COLOR_BGR2Lab)\n return np.reshape(labimg[:, :, 0], (config.IMAGE_SIZE, config.IMAGE_SIZE, 1)), labimg[:, :, 1:]", "def read(ios):\n assert(isinstance(ios, io.IOBase))\n return Reader(ios).read()", "def read(self, *, index=None, **kwargs):\n\n if index is None:\n index = _legacy_default_index(self._format)\n\n if index is Ellipsis:\n img = np.stack([im for im in self.iter(**kwargs)])\n return img\n\n reader = self.legacy_get_reader(**kwargs)\n return reader.get_data(index)", "def read_image_from_fs(name):\n with open(name, \"rb\") as fin:\n return fin.read()", "def read(infile):\n _, ext = os.path.splitext(infile)\n ext = ext.strip('.')\n return read_funcs[ext](infile)", "def read_image(file_name, representation=GRAY_SCALE):\n im = np.array(imread(file_name))\n img_float = im.astype(np.float32)\n if representation == 1: # return grayscale image\n if len(im.shape) == TWO_DIM: # image was given in grayscale\n return img_float\n elif len(im.shape) == THREE_DIM: # image is rgb, convert to grayscale\n return rgb2gray(img_float)\n elif representation == 2: # return rgb\n return img_float", "def get_raw_img(image_name):\n # IMREAD_COLOR ignores transparency (!)\n return cv2.imread(image_name, cv2.IMREAD_COLOR)", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def imReadAndConvert(filename: str, representation: int) -> np.ndarray:\r\n return normalize(imgRead(filename,representation)).astype(np.float)", "def read_image(img_path):\r\n got_img = False\r\n while not got_img:\r\n try:\r\n img = Image.open(img_path).convert('RGB')\r\n got_img = True\r\n except IOError:\r\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\r\n pass\r\n return img", "def read_img(img_id, data_dir, train_or_test, size):\n img = image.load_img(os.path.join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)\n img = image.img_to_array(img)\n return img", "def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images", "def read_image(filename):\n try:\n fi = open(filename,\"r\")\n lines = fi.readlines()\n n = int(lines[0]);\n img = create_zeroed_image(n)\n for i,line in enumerate(lines[1:]):\n clean_line = line.strip() # remove whitespace and newlines\n for j,char in enumerate(clean_line):\n # your code here\n \n img[i][j]=char\n # end your code here\n return img\n except IOError:\n raise Exception(\"Cannot find file \" + filename);\n finally:\n fi.close()", "def _index_img(img_file, index):\n imgs = check_niimg(img_file, ensure_ndim=4, atleast_4d=True)\n return _index_img(imgs, index)", "def read(path):", "def binary_read(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs):\n return BinaryFileReadBlock(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs)", "def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n img.close()\n return img_arr", "def read_image(fname, roi=None, dset_name='default', parallelism=1):\n\n from functools import partial\n from numpy import array, ndarray\n from multiprocessing import Pool, cpu_count\n\n if isinstance(fname, str):\n fmt = fname.split('.')[-1]\n \n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n \n result = reader(fname)\n\n elif isinstance(fname, (tuple, list, ndarray)):\n fmt = fname[0].split('.')[-1]\n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n\n if parallelism == 1:\n result = array([reader(f) for f in fname])\n\n else:\n if parallelism == -1:\n num_cores = cpu_count()\n else:\n num_cores = min(parallelism, cpu_count())\n\n with Pool(num_cores) as pool:\n result = array(pool.map(reader, fname))\n else:\n raise TypeError(\n \"First argument must be string for a one file or (tuple, list, ndarray) for many files\"\n )\n\n return result", "def read_image(img):\n out = Image.open(img)\n return Technicolor(out)", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()" ]
[ "0.7021543", "0.69873", "0.6858336", "0.6605103", "0.65766186", "0.65725684", "0.6543384", "0.65361625", "0.6512165", "0.6454746", "0.6408091", "0.6406229", "0.63912904", "0.6366565", "0.63648134", "0.6295551", "0.62764174", "0.62492245", "0.61694163", "0.61654574", "0.6146891", "0.6132239", "0.61298186", "0.6127052", "0.607849", "0.6062225", "0.60458213", "0.6042526", "0.6040394", "0.6022941", "0.60021317", "0.5999303", "0.598013", "0.59667873", "0.5958743", "0.595614", "0.5953873", "0.59479177", "0.5929743", "0.59243363", "0.5914315", "0.5912997", "0.59000397", "0.58678865", "0.58636284", "0.5856133", "0.5855526", "0.5841447", "0.5836708", "0.5833537", "0.5816635", "0.5816635", "0.581581", "0.5809447", "0.5807782", "0.5805275", "0.5788319", "0.5781626", "0.57758373", "0.57687044", "0.57536876", "0.57499164", "0.5732929", "0.57297295", "0.57121307", "0.5704198", "0.570288", "0.57022053", "0.57012254", "0.56998867", "0.56935495", "0.56902725", "0.5678182", "0.56714094", "0.56611866", "0.5658874", "0.56544715", "0.56536335", "0.5633447", "0.5630646", "0.5629564", "0.5627161", "0.5624909", "0.56213313", "0.56191224", "0.56185186", "0.56146026", "0.56135404", "0.5610417", "0.56076247", "0.5594366", "0.5576966", "0.55751354", "0.55717367", "0.55715686", "0.5570433", "0.55666834", "0.5566009", "0.55631655", "0.55618095", "0.5561039" ]
0.0
-1
An image augmentation function
def _augment(img): return flip(img, axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def augment(self, image):\n pass", "def image_augmentation(img):\n return np.fliplr(img)", "def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter", "def _augment(img):\r\n return flip(img, axis=2)", "def get_augmentation_sequence():\n # Macro to apply something with 50% chance\n sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 50%\n rarely = lambda aug: iaa.Sometimes(0.1, aug) # 10%\n\n # Augmentation applied to every image\n # Augmentors sampled one value per channel\n aug_sequence = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.5), # vertically flip 50% of all images\n\n # crop images by -0.25% to 0.25% of their height/width\n # positive values crop the image, negative pad\n sometimes(iaa.CropAndPad(\n percent=(-0.25, 0.25),\n pad_mode=['constant', 'edge'], # pad with constant value of the edge value\n pad_cval=(0, 0) # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 0), # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n mode='constant' # ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # rarely(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),\n iaa.GaussianBlur((0, 3.0)),\n iaa.Add((-10, 10), per_channel=0.7), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)),\n # sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))\n ],\n random_order=True\n )\n\n return aug_sequence", "def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image", "def img_augmentation(augmentation, img, bbox):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_augmentation(augmentation, img, bbox)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox", "def _apply_augment(self, img: Image, name: str, level: int) -> Image:\n assert 0 <= level < self.n_level\n augment_fn, low, high = self.transforms_info[name]\n return augment_fn(img.copy(), level * (high - low) / self.n_level + low)", "def img_and_mask_augmentation(augmentation, img, mask):\n\n # img_copy = img.copy()\n image_shape = img.shape\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n mask_aug = det.augment_image(mask, hooks=imgaug.HooksImages(activator=hook))\n mask_aug = mask_aug.astype(np.bool)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, mask_aug", "def augmentation_simple(filename, aug_type, max_H, max_W, folder=CONFIG.data_folder):\r\n\r\n image = rgb2grey(mpimg.imread(os.path.join(folder, filename)))\r\n image_augmented = np.ones(shape=(max_H, max_W))\r\n (h, w) = np.shape(image)\r\n stride_0, stride_1 = max_H - h, (max_W - w) // 2\r\n offset = ((aug_type % 2) * stride_0, (aug_type % 3) * stride_1)\r\n image_augmented[offset[0]: h + offset[0], offset[1]: w + offset[1]] = image\r\n\r\n return image_augmented", "def shift_augmentation():\n shift = np.random.randint(-200, 201, size=2)\n return lambda image: shift_with_extension(image, shift)", "def rotate_augmentation():\n rand_rotate = np.random.randint(180)\n return lambda image: rotate_with_extension(image, rand_rotate)", "def augment_img(img):\n img = random_hflip_img(img)\n img = cutout_img(img, size=12)\n img = zero_pad_and_crop_img(img)\n return img", "def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()", "def augmentation_nine(filename, aug_type, max_H, max_W, folder=CONFIG.data_folder):\r\n\r\n # image = rgb2grey(mpimg.imread(os.path.join(folder, filename)))\r\n\r\n # rotating a 214 pixel image by 2 deg yield 8 more pixels\r\n image_augmented = np.ones(shape=(max_H, max_W))\r\n image = Image.open(os.path.join(folder, filename))\r\n image = image.convert('RGB')\r\n # note that Image read rgb imgs as 0-255\r\n #################################\r\n # aug_type = 8\r\n\r\n w_ori, h_ori = image.size\r\n\r\n rotate_ind = aug_type % 3\r\n scale_ind = aug_type // 3\r\n\r\n image = PIL.ImageOps.invert(image)\r\n if rotate_ind == 1:\r\n image = image.rotate(2, expand=True)\r\n elif rotate_ind == 2:\r\n image = image.rotate(-2, expand=True)\r\n image = PIL.ImageOps.invert(image)\r\n\r\n h, w = image.size\r\n\r\n if scale_ind == 1:\r\n h, w = np.int(np.floor(h * 0.98)), np.int(np.floor(w * 0.98))\r\n image = image.resize((h, w))\r\n elif scale_ind == 2:\r\n h, w = np.int(np.floor(h * 0.96)), np.int(np.floor(w * 0.96))\r\n image = image.resize((h, w))\r\n\r\n # put image there. 9 images in total. this enhalts shifting.\r\n # scale to (0-1)\r\n image = rgb2grey(np.array(image) / 255)\r\n\r\n h, w = np.shape(image)\r\n\r\n stride_0, stride_1 = (max_H - 10 - h_ori) // 2, (max_W - 10 - w_ori) // 2\r\n offset = ((aug_type % 3) * stride_0, (aug_type % 3) * stride_1)\r\n try:\r\n image_augmented[offset[0]: h + offset[0], offset[1]: w + offset[1]] = image\r\n except ValueError:\r\n print(filename)\r\n\r\n return image_augmented", "def perform_augmentations(image, gt_image, augmentations, probabilities):\n for i in range(len(augmentations)):\n if np.random.rand(1) < probabilities[i]:\n image, gt_image = augmentations[i](image, gt_image)\n\n return image, gt_image", "def flip_augmentation():\n return lambda image: ImageOps.flip(image)", "def augment_image(im):\n # First crop out the face to save reduce computation load\n bb = im.landmarks['bb'].lms\n bb_vec = bb.as_vector()\n bb_ul = (np.array([bb_vec[0], bb_vec[1]]) - bb.centre()) * 2\n bb_lr = (np.array([bb_vec[4], bb_vec[5]]) - bb.centre()) * 2\n ul = bb_ul + bb.centre()\n lr = bb_lr + bb.centre()\n im = im.crop(ul, lr, constrain_to_boundary=True)\n if im.pixels.shape[0] == 1:\n pix = np.zeros((3, im.pixels.shape[1], im.pixels.shape[2]))\n pix[:,] = im.pixels\n im.pixels = pix\n\n beta = 0.3\n cx = np.random.uniform(-beta, beta)\n cy = np.random.uniform(-beta, beta)\n fx = 1.0\n fy = np.random.uniform(0.6, 1.4)\n max_rotation = 30\n theta = np.random.uniform(-max_rotation, max_rotation)\n\n rotation = menpo.transform.Rotation.init_from_2d_ccw_angle(theta)\n shear = menpo.transform.Affine(np.array([[1, cx, 0],[cy, 1, 0], [0,0,1]]))\n scale = menpo.transform.Affine(np.array([[fx, 0, 0],[0, fy, 0], [0,0,1]]))\n T = scale.compose_after(shear).compose_after(rotation)\n\n t_im = im.transform_about_centre(T)\n\n t_im = add_color_jetting(t_im)\n t_im = add_occlusion(t_im)\n\n\n new_bb = t_im.landmarks['PTS'].lms.bounding_box()\n\n #new_bb contains the gt bounding box\n augmented_bb = add_bb_noise(new_bb)\n augmented_bb = augmented_bb.reshape((4,2))\n augmented_bb = menpo.shape.PointCloud(augmented_bb)\n t_im.landmarks['bb'] = menpo.landmark.LandmarkGroup.init_with_all_label(augmented_bb)\n\n return t_im", "def data_augmentation(image, mode):\n if mode == 0:\n # original\n return image\n elif mode == 1:\n # flip up and down\n return np.flipud(image)\n elif mode == 2:\n # rotate counter-clockwise 90 degree\n return np.rot90(image)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n image = np.rot90(image)\n return np.flipud(image)\n elif mode == 4:\n # rotate 180 degree\n return np.rot90(image, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n image = np.rot90(image, k=2)\n return np.flipud(image)\n elif mode == 6:\n # rotate 270 degree\n return np.rot90(image, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n image = np.rot90(image, k=3)\n return np.flipud(image)", "def augmenter(x, y):\n # Note that we only use fliprots along axis=(1,2), i.e. the yx axis\n # as 3D microscopy acquisitions are usually not axially symmetric\n x, y = random_fliprot(x, y, axis=(1, 2))\n x = random_intensity_change(x)\n return x, y", "def set_augmentor():\n config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},\n 'brightness': {'values': (0.6, 1.0), 'prob': 0.1},\n 'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},\n 'flip': {'values': ('hor',), 'prob': 0.5},\n 'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},\n 'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},\n 'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},\n 'rotate': {'values': (-45, 45), 'prob': 0.4},\n 'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},\n 'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}\n\n augmentor = Augmentor(config, no_repetition=True)\n\n return augmentor", "def get_validation_augmentation(x: int = 320, y: int = 640):\n test_transform = [\n albu.Resize(x, y)\n ]\n return albu.Compose(test_transform, additional_targets={\"image2\": \"image\", \"image3\": \"image\", \"image4\": \"image\"})", "def image_augmentation(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n\n transform_list = [\n T.RandomCrop(crop_type=\"relative_range\", crop_size=[0.95, 0.87]),\n T.RandomBrightness(0.9, 1.5),\n T.RandomContrast(0.8, 1.6),\n T.RandomSaturation(1.0, 1.6),\n T.RandomRotation(angle=[15, 0, 5, 6, 15], expand=False),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.ResizeScale(1.0, 2.0, target_height=900, target_width=700)\n ]\n\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annotations = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n instances = utils.annotations_to_instances(annotations, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n return dataset_dict", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)", "def get_augmented_image(image):\n\n # Rotate randomly about origin\n augmented_image = scipy.ndimage.rotate(image, angle=random.randint(-5, 5), reshape=False, mode='nearest')\n\n # Shift by a random amount\n augmented_image = get_shifted_image(augmented_image, max_shift=5)\n\n # Change brightness by a random amount\n augmented_image = np.clip(augmented_image.astype(np.float32) * random.uniform(0.7, 1.3), 0, 255)\n\n return augmented_image.astype(np.uint8)", "def image_augmentations(\n image,\n data_augmentations,\n model_input_image_size,\n label=None):\n if image.get_shape() == None:\n im_size = model_input_image_size\n else:\n im_size = image.get_shape().as_list()\n im_size_check = True # np.any(\n # np.less_equal(\n # model_input_image_size[:2],\n # im_size[:2]))\n if data_augmentations is not None:\n for aug in data_augmentations:\n # Pixel/image-level augmentations\n if aug == 'image_float32':\n image = tf.cast(image, tf.float32)\n if aug == 'label_float32':\n label = tf.cast(label, tf.float32)\n if aug == 'bfloat16':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'singleton':\n image = tf.expand_dims(image, axis=-1)\n print 'Adding singleton dimension to image.'\n if aug == 'sgl_label' or aug == 'singleton_label':\n label = tf.expand_dims(label, axis=-1)\n print 'Adding singleton dimension to label.'\n if aug == 'coco_labels':\n label = tf.nn.relu(label - 91)\n if aug == 'contrastive_loss':\n label = tf.stack(\n [tf.ones_like(label), tf.zeros_like(label)], -1)\n if aug == 'bsds_normalize':\n data = np.load(\n '/media/data_cifs/image_datasets/BSDS500/images/train/file_paths.npz')\n mean = data['mean'].squeeze(0)\n stds = data['stds'].squeeze(0)\n image = (image - mean) / stds\n if aug == 'bsds_crop' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n [1., 1, 1.1, 1.2])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_nearest_neighbor(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize,\n tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n print 'Applying BSDS crop.'\n if aug == 'hed_resize' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n np.arange(1, 1.51, 0.1)) # 0.7, 1.5\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bilinear(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n print 'Applying HED resize.'\n if aug == 'uint8_rescale':\n image = tf.cast(image, tf.float32) / 255.\n print 'Applying uint8 rescale to the image.'\n if aug == 'cube_plus_rescale':\n image = tf.cast(image, tf.float32) / 13273.\n print 'Applying uint8 rescale to the image.'\n if aug == 'uint8_rescale_label':\n label = tf.cast(label, tf.float32) / 255.\n print 'Applying uint8 rescale to the label.'\n if aug == 'uint8_rescale_-1_1':\n image = 2 * (tf.cast(image, tf.float32) / 255.) - 1\n print 'Applying uint8 rescale.'\n if aug == 'image_to_bgr':\n image = tf.stack(\n [image[..., 2], image[..., 1], image[..., 0]], axis=-1)\n if aug == 'pascal_normalize':\n image = image - [123.68, 116.78, 103.94]\n if aug == 'ilsvrc12_normalize':\n MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n image = (image - MEAN_RGB) / STDDEV_RGB\n if aug == 'random_contrast':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n print 'Applying random contrast.'\n if aug == 'random_brightness':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_brightness(image, max_delta=63.)\n print 'Applying random brightness.'\n if aug == 'grayscale' and im_size_check:\n # image = tf.image.rgb_to_grayscale(image)\n if len(image.get_shape().as_list()) == 2:\n image = tf.expand_dims(image, axis=-1)\n else:\n image = tf.expand_dims(image[..., 0], axis=-1)\n print 'Converting to grayscale.'\n if aug == 'rgb2gray' and im_size_check:\n image = tf.image.rgb_to_grayscale(image)\n print 'Converting rgb2gray.'\n if aug == 'clip_uint8' and im_size_check:\n image = tf.minimum(image, 255.)\n image = tf.maximum(image, 0.)\n if aug == 'cube_plus_crop':\n image = cube_plus_crop(image, model_input_image_size)\n # Affine augmentations\n if aug == 'rotate' and im_size_check:\n max_theta = 22.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'rotate90' and im_size_check:\n image = tf.image.rot90(\n image,\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n print 'Applying random 90 degree rotate.'\n if aug == 'rotate90_image_label' and im_size_check:\n concat = tf.image.rot90(\n tf.concat([image, label], -1),\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n image = concat[..., :im_size[-1]]\n label = concat[..., im_size[-1]:]\n print 'Applying random 90 degree rotate to images and labels.'\n if aug == 'stack3d':\n image = tf.concat([image, image, image], axis=-1)\n if aug == 'rot_image_label' and im_size_check:\n max_theta = 30.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n label = tf.contrib.image.transform(\n label,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'random_scale_crop_image_label'\\\n and im_size_check:\n scale_choices = tf.convert_to_tensor(\n [1., 1.04, 1.08, 1.12, 1.16])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n model_input_image_size[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bicubic(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize, tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n if aug == 'rc_res' and im_size_check:\n image = random_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying random crop and resize.'\n if aug == 'cc_res' and im_size_check:\n image = center_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying center crop and resize.'\n if aug == 'random_crop' and im_size_check:\n image = random_crop(image, model_input_image_size)\n print 'Applying random crop.'\n if aug == 'center_crop' and im_size_check:\n image = center_crop(image, model_input_image_size)\n print 'Applying center crop.'\n if aug == 'rc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='random')\n if aug == 'cc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='center')\n if aug == 'resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying area resize.'\n if aug == 'jk_resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = tf.image.resize_image_with_crop_or_pad(\n image,\n model_input_image_size[0],\n model_input_image_size[1])\n print 'Applying area resize.'\n if aug == 'random_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = random_crop(image, model_input_image_size)\n if aug == 'center_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = center_crop(image, model_input_image_size)\n if aug == 'res_and_crop' and im_size_check:\n model_input_image_size_1 = np.asarray(\n model_input_image_size[:2]) + 28\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size_1,\n f='area')\n image = center_crop(image, model_input_image_size)\n print 'Applying area resize.'\n if aug == 'res_nn' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'res_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying bilinear resize.'\n if aug == 'res_nn_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'left_right':\n image = image_flip(image, direction='left_right')\n print 'Applying random flip left-right.'\n if aug == 'up_down':\n image = image_flip(image, direction='up_down')\n print 'Applying random flip up-down.'\n if aug == 'lr_viz_flip':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_viz_flip(image, label)\n image, label = ud_viz_flip(image, label)\n if aug == 'lr_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_flip_image_label(image, label)\n if aug == 'ud_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = ud_flip_image_label(image, label)\n if aug == 'gratings_modulate':\n modulate = 10\n image //= modulate\n offset = (255 / 2) - ((255 / modulate) / 2)\n image += offset\n if aug == 'gaussian_noise':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 10.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'gaussian_noise_small':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 20.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'mixup':\n raise RuntimeError('Mixup not properly implemented yet.')\n alpha = 0.4\n dist = tf.distributions.Beta(alpha, alpha)\n image = image * dist + (1 - dist) * tf.roll(image, 0, 1)\n label = label * dist + (1 - dist) * tf.roll(label, 0, 1)\n if aug == 'hed_brightness':\n image = tf.image.random_brightness(image, 63)\n if aug == 'hed_contrast':\n image = tf.image.random_contrast(image, lower=0.4, upper=1.5)\n if aug == 'blur_labels':\n label = tf_blur(\n image=label,\n kernel_size=3, # extent\n name='label_blur',\n normalize=True,\n sigma=1.)\n if aug == 'calculate_rate_time_crop':\n im_shape = image.get_shape().as_list()\n minval = im_shape[0] // 3\n time_crop = tf.random_uniform(\n [],\n minval=minval,\n maxval=im_shape[0],\n dtype=tf.int32)\n\n # For now always pull from the beginning\n indices = tf.range(0, time_crop, dtype=tf.int32)\n selected_image = tf.gather(image, indices)\n padded_image = tf.zeros(\n [im_shape[0] - time_crop] + im_shape[1:],\n dtype=selected_image.dtype)\n\n # Randomly concatenate pad to front or back\n image = tf.cond(\n pred=tf.greater(\n tf.random_uniform(\n [],\n minval=0,\n maxval=1,\n dtype=tf.float32),\n 0.5),\n true_fn=lambda: tf.concat(\n [selected_image, padded_image], axis=0),\n false_fn=lambda: tf.concat(\n [padded_image, selected_image], axis=0)\n )\n image.set_shape(im_shape)\n\n # Convert label to rate\n label = label / im_shape[0]\n if aug == 'calculate_rate':\n label = label / image.get_shape().as_list()[0]\n print 'Applying rate transformation.'\n if aug == 'threshold':\n image = tf.cast(tf.greater(image, 0.1), tf.float32)\n print 'Applying threshold.'\n if aug == 'nonzero_label':\n label = tf.cast(tf.greater(label, 0.2), tf.float32)\n print 'Applying threshold.'\n if aug == 'zero_one':\n image = tf.minimum(tf.maximum(image, 0.), 1.)\n print 'Applying threshold.'\n if aug == 'timestep_duplication':\n image = tf.stack([image for iid in range(7)])\n print 'Applying timestep duplication.'\n if aug == 'per_image_standardization':\n image = tf.image.per_image_standardization(image)\n print 'Applying per-image zscore.'\n if aug == 'flip_image_polarity':\n image = tf.abs(image - 1.)\n if aug == 'flip_label_polarity':\n label = tf.abs(label - 1.)\n if aug == 'NCHW':\n image = tf.transpose(image, (2, 0, 1))\n if aug == 'bfloat16_image':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'bfloat16_label':\n label = tf.cast(label, tf.bfloat16)\n if aug == 'hfloat16_image':\n image = tf.cast(image, tf.float16)\n if aug == 'hfloat16_label':\n label = tf.cast(label, tf.float16)\n if aug == 'threshold_label':\n label = tf.cast(tf.greater(label, 0.999), tf.float32)\n print 'Applying threshold of 0.999 to the label.'\n if aug == 'threshold_label_255':\n # cABC label = tf.cast(tf.greater(label, 200), tf.float32)\n label = tf.cast(tf.greater(label, 10), tf.float32)\n print 'Applying threshold of 127.5 to the label.'\n if aug == 'normalize_label':\n label = tf.cast(label, tf.float32)\n label = label / tf.reduce_max(label) # tf.cast(tf.greater(label, 25), tf.float32)\n print 'Normalizing label to [0, 1].'\n if aug == 'scale_to_255':\n image = image * 255.\n if aug == 'clip_255':\n image = tf.maximum(tf.minimum(255., image), 0.)\n # else:\n # assert len(image.get_shape()) == 3, '4D not implemented yet.'\n # image = tf.image.resize_image_with_crop_or_pad(\n # image, model_input_image_size[0], model_input_image_size[1])\n return image, label", "def augment_images(folder, augmenter, images, size = (224, 224), start_index=0, iterations=1):\n # Get the total number of images\n n = len(images)\n \n # Main iteration that applies random transformations to the images\n for i in range(iterations):\n # Apply transformations to the images\n images_augmented = augmenter(images=images)\n \n # Save the augmented images on the disk\n save_images_in_folder(folder=folder, images=images_augmented, size=size, start_index=i*n)", "def mirror_augmentation():\n return lambda image: ImageOps.mirror(image)", "def _data_augmentation(feature_dict):\n image_features = feature_dict[_transformed_name(constants.IMAGE_KEY)]\n image_features = _image_augmentation(image_features)\n feature_dict[_transformed_name(constants.IMAGE_KEY)] = image_features\n return feature_dict", "def aortic_data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n\n # For N image. which come come from the same subject in the LSTM model,\n # generate the same random affine transformation parameters.\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # The affine transformation (rotation + scale + shift)\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D(\n (row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n\n # Apply the transformation to the image\n for i in range(image.shape[0]):\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(\n image[i, :, :, c], M[:, :2], M[:, 2], order=1)\n\n label2[i, :, :] = ndimage.interpolation.affine_transform(\n label[i, :, :], M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2", "def xray_augmentationFactory(augmentation, height, width):\n downsample = (260,260)\n\n if augmentation == 'autoaugment':\n transform = [\n transforms.RandomCrop((height, width)),\n transforms.RandomHorizontalFlip(),\n AutoAugment(),\n Cutout()\n ]\n elif augmentation == 'original-cifar':\n transform = [\n transforms.Resize(downsample),\n transforms.RandomCrop(size=(height, width)),\n transforms.RandomHorizontalFlip(),\n ]\n elif augmentation == 'noaugment':\n transform = [\n transforms.Resize(downsample),\n transforms.CenterCrop((height, width)),\n ]\n\n elif augmentation == 'glico':\n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n else: \n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n #normalize = transforms.Normalize(mean=[0.5888, 0.5888, 0.5889],\n #std=[0.1882, 0.1882, 0.1882])\n\n return transforms.Compose(transform + [transforms.ToTensor(), normalize])", "def get_test_augmentation(x: int = 320, y: int = 640):\n test_transform = [\n albu.Resize(x, y),\n albu.HueSaturationValue(0, (30, 30), (0, 0), always_apply=True),\n ]\n return albu.Compose(test_transform, additional_targets={\"image2\": \"image\", \"image3\": \"image\", \"image4\": \"image\"})", "def data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n for i in range(image.shape[0]):\n # For each image slice, generate random affine transformation parameters\n # using the Gaussian distribution\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # Apply the affine transformation (rotation + scale + shift) to the image\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],\n M[:, :2], M[:, 2], order=1)\n\n # Apply the affine transformation (rotation + scale + shift) to the label map\n label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],\n M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2", "def visualize_augmentation(X, y):\n n_samples = 10\n n_augmentations = 9\n Xbatches = DataFlow(X, y, 64)\n indices = Xbatches.indices[:n_samples]\n X_samples = X[indices]\n y_samples = y[indices]\n X_augmented = X_samples / 127.5 - 1.0\n for i in range(n_augmentations):\n x, _ = Xbatches.process_batch(X_samples, y_samples)\n X_augmented = np.concatenate([X_augmented, x])\n X_tiled = tile(X_augmented, n_augmentations + 1, n_samples)\n X_tiled = (X_tiled + 1.0) / 2.0\n\n fig = plt.figure(figsize = (10,10))\n ax = fig.add_subplot(111)\n ax.imshow(X_tiled)\n ax.set_axis_off()\n fig.savefig(os.path.join(img_dir, \"data_augmentation.png\"))", "def augment_image(image):\n with tf.variable_scope('AugmentImage'):\n height = image.get_shape().dims[0].value\n width = image.get_shape().dims[1].value\n\n # Random crop cut from the street sign image, resized to the same size.\n # Assures that the crop is covers at least 0.8 area of the input image.\n bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes = tf.zeros([0, 0, 4]),\n min_object_covered = 0.8,\n aspect_ratio_range = [0.8, 1.2],\n area_range = [0.8, 1.0],\n use_image_if_no_bounding_boxes = True)\n distorted_image = tf.slice(image, bbox_begin, bbox_size)\n\n # Randomly chooses one of the 4 interpolation methods\n distorted_image = inception_preprocessing.apply_with_random_selector(\n distorted_image,\n lambda x, method: tf.image.resize_images(x, [height, width], method),\n num_cases = 4)\n distorted_image.set_shape([height, width, 3])\n\n # Color distortion\n distorted_image = inception_preprocessing.apply_with_random_selector(\n distorted_image,\n functools.partial(\n inception_preprocessing.distort_color, fast_mode = False),\n num_cases = 4)\n distorted_image = tf.clip_by_value(distorted_image, -1.5, 1.5)\n\n return distorted_image", "def createAugmentor(self):\n rotation_range = [-15, 15]\n shear_range = [-0.3 * 180 / math.pi, 0.3 * 180 / math.pi]\n zoom_range = [0.8, 2]\n shift_range = [5, 5]\n\n return ImageAugmentor(0.5, shear_range, rotation_range, shift_range, zoom_range)", "def data_augmentation(image_data, mask_data, rotate=False, vertical_flip=False, horizontal_flip=False):\n aug_images = []\n aug_masks = []\n\n for _ in range(len(image_data)):\n if rotate:\n rotation = A.RandomRotate90(p=1)\n rotated_data = rotation(image=image_data[_], mask=mask_data[_])\n rotated_image = rotated_data['image']\n rotated_mask = rotated_data['mask']\n aug_images.append(rotated_image)\n aug_masks.append(rotated_mask)\n\n if vertical_flip:\n flip_v = A.VerticalFlip(p=1)\n vertical_data = flip_v(image=image_data[_], mask=mask_data[_])\n vertical_image = vertical_data['image']\n vertical_mask = vertical_data['mask']\n aug_images.append(vertical_image)\n aug_masks.append(vertical_mask)\n\n if horizontal_flip:\n flip_h = A.HorizontalFlip(p=1)\n horizontal_data = flip_h(image=image_data[_], mask=mask_data[_])\n horizontal_image = horizontal_data['image']\n horizontal_mask = horizontal_data['mask']\n aug_images.append(horizontal_image)\n aug_masks.append(horizontal_mask)\n\n nd_images = make_ndarray(aug_images)\n nd_masks = make_ndarray(aug_masks)\n #nd_images = np.zeros((len(aug_images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)\n #nd_masks = np.zeros((len(aug_masks), IMG_HEIGHT, IMG_WIDTH), dtype=np.float32)\n\n #for _ in range(len(aug_images)): # Load into ndarray\n # nd_images[_] = aug_images[_]\n # nd_masks[_] = aug_masks[_] # load mask without channel variable\n\n return nd_images, nd_masks", "def augment_image(image,max_angle):\n angles = [-max_angle,max_angle]\n axes = [(0,1),(0,2),(1,2)]\n images_aug = [image,image[::-1]]\n for angle in angles:\n for axis in axes:\n images_aug.append(aug.rotate(image, angle, axes=axis, reshape=False, order=0))\n images_aug.append(aug.rotate(image[::-1], angle, axes=axis, reshape=False, order=0))\n return images_aug", "def augment_image(images_folder_path, file_extension, rotation=True, flipping=True):\n\n def save_image(img_name, img):\n # this function save image into target folder\n cv2.imwrite(images_folder_path + img_name, img)\n\n def rotate(image_name, angle=90):\n \"\"\"\n Rotate the image\n :param image_name:\n :param angle: Rotation angle in degrees. Positive values mean\n counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).\n \"\"\"\n img = cv2.imread(images_folder_path + image_name)\n rotated = imutils.rotate_bound(img, angle)\n rotated_image_name = str(angle) + \"_\" + image_name\n return rotated_image_name, rotated\n\n def flip(image_name, vflip=False, hflip=False):\n \"\"\"\n Flip the image\n :param image_name:\n :param vflip: whether to flip the image vertically\n :param hflip: whether to flip the image horizontally\n \"\"\"\n save_name = \"\"\n img = cv2.imread(images_folder_path + image_name)\n if vflip:\n c = 1\n save_name = \"flip_v\"\n if hflip:\n c = 0\n save_name = \"flip_h\"\n if hflip and vflip:\n c = -1\n save_name = \"flip_hv\"\n\n flip_image = cv2.flip(img, flipCode=c)\n flip_image_name = save_name + \"_\" + image_name\n\n return flip_image_name, flip_image\n\n all_images_name = path.read_all_files_name_from(folder_path=images_folder_path,\n file_extension=file_extension)\n counter = 0\n\n # adding random noise to image.\n # img_noise = random_noise(img, mode= 's&p', clip=True)\n\n for image_name in all_images_name:\n # Perform the counter clockwise rotation holding at the center\n # 90 degrees\n if rotation:\n rotated_img_name, rotated_img = rotate(image_name, angle=90)\n save_image(rotated_img_name, rotated_img)\n rotated_img_name, rotated_img = rotate(image_name, angle=180)\n save_image(rotated_img_name, rotated_img)\n rotated_img_name, rotated_img = rotate(image_name, angle=270)\n save_image(rotated_img_name, rotated_img)\n\n if flipping:\n # is same as 180 rotation\n # flip_image_name, flip_image = flip(image_name, vflip=True, hflip=True)\n # save_image(flip_image_name, flip_image)\n flip_image_name, flip_image = flip(image_name, vflip=True, hflip=False)\n save_image(flip_image_name, flip_image)\n flip_image_name, flip_image = flip(image_name, vflip=False, hflip=True)\n save_image(flip_image_name, flip_image)\n\n if counter % 50 == 0:\n print(counter)\n counter = counter + 1", "def augment(image, n,\n hflip=False, vflip=False, scale_to_percent=1.0, scale_axis_equally=True,\n rotation_deg=0, shear_deg=0, translation_x_px=0, translation_y_px=0,\n brightness_change=0.0, noise_mean=0.0, noise_std=0.0):\n assert n >= 0\n result = []\n if n == 0:\n return result\n\n width = image.shape[0]\n height = image.shape[1]\n matrices = create_aug_matrices(n, img_width_px=width, img_height_px=height,\n scale_to_percent=scale_to_percent,\n scale_axis_equally=scale_axis_equally,\n rotation_deg=rotation_deg,\n shear_deg=shear_deg,\n translation_x_px=translation_x_px,\n translation_y_px=translation_y_px)\n for i in range(n):\n img = np.copy(image)\n matrix = matrices[i]\n\n # random horizontal / vertical flip\n if hflip and i % 2 == 0:\n img = np.fliplr(img)\n if vflip and random.random() > 0.5:\n img = np.flipud(img)\n\n # random brightness adjustment\n by_percent = random.uniform(1.0 - brightness_change, 1.0 + brightness_change)\n img = img * by_percent\n\n # gaussian noise\n # numpy requires a std above 0\n if noise_std > 0:\n img = img + (255 * np.random.normal(noise_mean, noise_std, (img.shape)))\n\n # clip to 0-255\n img = np.clip(img, 0, 255).astype(np.uint8)\n\n arr = tf.warp(img, matrix, mode=\"nearest\") # projects to float 0-1\n img = np.array(arr * 255, dtype=np.uint8)\n result.append(img)\n\n return result", "def visualize_augmentation(image, angle):\n\n # Create a copy of the image to prevent changing the original\n img = np.copy(image)\n\n cols = 2\n rows = 6\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n # Plot original images in the left column\n for idx in range(rows):\n ax[idx, 0].imshow(img)\n ax[idx, 0].set_title(\"Original, Angle = \" + str(round(angle, 3)))\n # Horizontal Flip\n tmp_img, tmp_angle = random_horizontal_flip(img, angle, 1.0)\n ax[0, 1].imshow(tmp_img)\n ax[0, 1].set_title(\"Horizontal Flip, Angle = \" + str(round(tmp_angle, 3)))\n # Translation\n tmp_img, tmp_angle = random_translation(img, angle)\n ax[1, 1].imshow(tmp_img)\n ax[1, 1].set_title(\"Translation, Angle = \" + str(round(tmp_angle, 3)))\n # Gaussian Noise\n tmp_img = random_gaussian(img)\n ax[2, 1].imshow(tmp_img)\n ax[2, 1].set_title(\"Gaussian Noise, Angle = \" + str(round(angle, 3)))\n # Shadows\n tmp_img = random_shadows(img, 1.0, 0.9)\n ax[3, 1].imshow(tmp_img)\n ax[3, 1].set_title(\"Shadows, Angle = \" + str(round(angle, 3)))\n # Brightness\n tmp_img = random_brightness(img)\n ax[4, 1].imshow(tmp_img)\n ax[4, 1].set_title(\"Brightness, Angle = \" + str(round(angle, 3)))\n # All Augmentation\n tmp_img, tmp_angle = random_all(img, angle)\n ax[5, 1].imshow(tmp_img)\n ax[5, 1].set_title(\"All Randomization, Angle = \" +\n str(round(tmp_angle, 3)))\n\n return fig", "def data_augmentation(self, img):\n new_img = img.astype(float)\n # random brightness - the mask bit keeps values from going beyond (0,255)\n value = np.random.randint(-28, 28)\n if value > 0:\n mask = (new_img[:, :, 0] + value) > 255\n if value <= 0:\n mask = (new_img[:, :, 0] + value) < 0\n new_img[:, :, 0] += np.where(mask, 0, value)\n # random shadow - full height, random left/right side, random darkening\n h, w = new_img.shape[0:2]\n mid = np.random.randint(0, w)\n factor = np.random.uniform(0.6, 0.8)\n if np.random.rand() > .5:\n new_img[:, 0:mid, 0] *= factor\n else:\n new_img[:, mid:w, 0] *= factor\n return (new_img.astype(np.uint8))", "def data_augmentation(image, mode):\n if mode == 0:\n # original\n out = image\n elif mode == 1:\n # flip up and down\n out = np.flipud(image)\n elif mode == 2:\n # rotate counterwise 90 degree\n out = np.rot90(image)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n out = np.rot90(image)\n out = np.flipud(out)\n elif mode == 4:\n # rotate 180 degree\n out = np.rot90(image, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n out = np.rot90(image, k=2)\n out = np.flipud(out)\n elif mode == 6:\n # rotate 270 degree\n out = np.rot90(image, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n out = np.rot90(image, k=3)\n out = np.flipud(out)\n else:\n raise Exception('Invalid choice of image transformation')\n\n return out", "def augment(x: np.ndarray, y: np.ndarray):\n scipy.random.seed()\n\n # scale = np.random.normal(1, 0.1, size=3)\n alpha, theta = np.random.normal(0, 9, size=2)\n alpha = 0\n\n for i in range(1, len(x.shape) - 1):\n if np.random.binomial(1, .5):\n x = np.flip(x, -i)\n y = np.flip(y, -i)\n\n # mscan = np.array([_scale_crop(i) for i in mscan])\n # segm = _scale_crop(segm[0])[np.newaxis]\n\n x = _rotate(x, 3, theta, alpha)\n y = _rotate(y, 0, theta, alpha)\n\n # if np.random.binomial(1, .5):\n # t = np.random.choice([-90, 0, 90])\n # a = np.random.choice([-90, 0, 90])\n # mscan = _rotate(mscan, 3, t, a)\n # segm = _rotate(segm, 3, t, a)\n\n x = np.array([i * np.random.normal(1, 0.35) for i in x])\n return x, y", "def augment(im_path):\n # change directory to toplevel of repo (parent of augmentation)\n os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])\n\n im_name, im_ext = os.path.splitext(im_path)\n if im_path not in os.listdir(\"data/raw\"):\n raise FileNotFoundError(f\"{im_path} could not be found in the list of raw images\")\n\n if im_name + \".json\" not in os.listdir(\"data/corrected\"):\n raise FileNotFoundError(f\"{im_name} has not been labelled yet! (no file '{im_name}.json' in corrected)\")\n\n with open(f\"data/corrected/{im_name}.json\") as read_file:\n im_label = json.loads(read_file.read(-1))\n persp = np.float32(im_label[\"perspective\"])\n\n im: Image.Image = Image.open(f\"data/raw/{im_path}\")\n # downscale image to reasonable height\n scale_factor = 500 / im.height\n persp = persp * scale_factor\n im.thumbnail([1000000, 500])\n im_cv = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n # determine crop box\n crop_amount = (im.width - 500)\n left_crop = random.randint(crop_amount//4, 3 * crop_amount // 4)\n # left_crop = crop_amount//2\n right_crop = crop_amount - left_crop\n box = [\n left_crop,\n 0,\n im.width - right_crop,\n im.height\n ]\n\n # warp perspective\n # basic way: add gaussian noise to the 4 corner points\n warped_persp = persp.copy()\n for i in range(4):\n for j in range(2):\n v = warped_persp[i][j]\n v += random.gauss(0, 5)\n # ensure none of the perspective points will fall outside the cropped image\n v = max(box[j] + 5, v)\n v = min(box[j+2] - 5, v)\n warped_persp[i][j] = v\n\n matrix = cv2.getPerspectiveTransform(persp, warped_persp)\n warped_im = cv2.warpPerspective(im_cv, matrix, (im.width, im.height))\n warped_im = Image.fromarray(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))\n\n # run crop on warped image\n warped_im = warped_im.crop(box)\n # adjust warped coordinates according to crop\n for i in range(4):\n warped_persp[i][0] -= box[0]\n warped_persp[i][1] -= box[1]\n\n # scale down to final size\n warped_im = warped_im.resize((256, 256))\n for i in range(4):\n warped_persp[i][0] *= 256 / 500\n warped_persp[i][1] *= 256 / 500\n\n # adjust image colour balance, saturation and contrast\n warped_im = ImageEnhance.Color(warped_im).enhance(random.uniform(0.9, 1.2))\n warped_im = ImageEnhance.Contrast(warped_im).enhance(random.uniform(0.8, 1.2))\n warped_im = ImageEnhance.Brightness(warped_im).enhance(random.uniform(0.8, 1.2))\n\n # adjust image temperature\n # thanks to Mark Ransom (https://stackoverflow.com/a/11888449)\n temp_r, temp_g, temp_b = random.choice(KELVIN_TABLE)\n convert_matrix = (temp_r / 255.0, 0.0, 0.0, 0.0,\n 0.0, temp_g / 255.0, 0.0, 0.0,\n 0.0, 0.0, temp_b / 255.0, 0.0)\n warped_im = warped_im.convert(\"RGB\", convert_matrix)\n\n # add noise\n noise_strength = random.uniform(5, 10)\n warped_im_arr = np.float64(np.array(warped_im))\n warped_im_arr += np.random.normal(0, noise_strength, warped_im_arr.shape)\n warped_im_arr = np.clip(warped_im_arr, 0, 255)\n warped_im = Image.fromarray(np.uint8(warped_im_arr))\n\n fname = f\"{im_name}-{hex(random.randint(2**20, 2**24))[2:]}\"\n warped_im.save(f\"data/augmented/{fname}{im_ext}\")\n with open(f\"data/augmented/{fname}.json\", \"w\") as write_file:\n data = {\n \"darts\": im_label[\"darts\"],\n \"perspective\": warped_persp.tolist()\n }\n write_file.write(json.dumps(data))\n return warped_im, warped_persp", "def augment(input):\n\treturn np.insert(input, 0, 1, axis = 1)\n\n\n\n\t#np.set_printoptions(suppress=True) ", "def augment(image,masks):\n\n # Random horizontal flipping\n if random.random() > 0.5:\n image = TF.hflip(image)\n masks = TF.hflip(masks)\n\n # Random vertical flipping\n if random.random() > 0.5:\n image = TF.vflip(image)\n masks = TF.vflip(masks)\n return image,masks", "def apply_augmentations(self, x, augmentations):\r\n # print(\"\\n#####################\\nSADataset.apply_augmentations: x shape == \", x.shape)\r\n if augmentations is not None:\r\n # print(\"SADataset.apply_augmentations: augmentations == \", augmentations)\r\n assert len(augmentations) == 11\r\n for j, curr_augmentation_set in enumerate(augmentations):\r\n assert len(list(curr_augmentation_set.keys())) == 2\r\n # print(\"SADataset.apply_augmentations: len(list(curr_augmentation_set.keys())) == 2\")\r\n # print(\"\\tSADataset.apply_augmentations: curr_augmentation_set == \", curr_augmentation_set)\r\n for _, curr_augmentation in enumerate(list(curr_augmentation_set.keys())):\r\n # print(\"\\t\\tSADataset.apply_augmentations: curr_augmentation == \", curr_augmentation)\r\n curr_augmentation_val = curr_augmentation_set[curr_augmentation]\r\n\r\n if curr_augmentation == 'amplitude_scale':\r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING amplitude_scale AUGMENTATION\")\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n x[:,j] = curr_augmentation_val * x[:,j]\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'DC_shift':\r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING DC_shift AUGMENTATION\")\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n x[:,j] = x[:,j] + curr_augmentation_val\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'additive_Gaussian_noise':\r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING additive_Gaussian_noise AUGMENTATION\")\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: np.random.normal(0, curr_augmentation_val, x[:,j].shape) shape == \", np.random.normal(0, curr_augmentation_val, x[:,j].shape).shape)\r\n x[:,j] = x[:,j] + np.random.normal(0, curr_augmentation_val, x[:,j].shape)# see https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python and https://numpy.org/doc/stable/reference/random/generated/numpy.random.normal.html\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'band-stop_filter':\r\n \"\"\"\r\n see:\r\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirnotch.html\r\n https://www.programcreek.com/python/example/115815/scipy.signal.iirnotch\r\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html\r\n \"\"\"\r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING band-stop_filter AUGMENTATION\")\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: curr_augmentation_val == \", curr_augmentation_val)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: curr_augmentation_val/self.BW == \", curr_augmentation_val/self.BW)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: self.SFREQ == \", self.SFREQ)\r\n b, a = iirnotch(curr_augmentation_val, curr_augmentation_val/self.BW, self.SFREQ)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: b == \", b)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: a == \", a)\r\n x[:,j] = lfilter(b, a, x[:,j])\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'time_shift':\r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING time_shift AUGMENTATION\")\r\n if curr_augmentation_val != 0:\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n new_signal = np.zeros(x[:,j].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal shape == \", new_signal.shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: curr_augmentation_val == \", curr_augmentation_val)\r\n if curr_augmentation_val < 0:\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal[:curr_augmentation_val] shape == \", new_signal[:curr_augmentation_val].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[np.abs(curr_augmentation_val):,j] shape == \", x[np.abs(curr_augmentation_val):,j].shape)\r\n new_signal[:curr_augmentation_val] = x[np.abs(curr_augmentation_val):,j]\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal[curr_augmentation_val:] shape == \", new_signal[curr_augmentation_val:].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:np.abs(curr_augmentation_val),j] shape == \", x[:np.abs(curr_augmentation_val),j].shape)\r\n new_signal[curr_augmentation_val:] = x[:np.abs(curr_augmentation_val),j]\r\n else:\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal[:curr_augmentation_val] shape == \", new_signal[:curr_augmentation_val].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[-curr_augmentation_val:,j] shape == \", x[-curr_augmentation_val:,j].shape)\r\n new_signal[:curr_augmentation_val] = x[-curr_augmentation_val:,j]\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal[curr_augmentation_val:] shape == \", new_signal[curr_augmentation_val:].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:-curr_augmentation_val,j] shape == \", x[:-curr_augmentation_val,j].shape)\r\n new_signal[curr_augmentation_val:] = x[:-curr_augmentation_val,j]\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: new_signal shape == \", new_signal.shape)\r\n x[:,j] = new_signal\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # else:\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: curr_augmentation_val == 0 -> SKIPPING\")\r\n elif curr_augmentation == 'zero-masking': \r\n # print(\"\\n\\t\\t\\tSADataset.apply_augmentations: NOW APPLYING zero-masking AUGMENTATION\")\r\n # print(\"\\t\\t\\tSADataset.apply_augmentations: x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j] shape == \", x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j].shape)\r\n x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j] = 0.\r\n else:\r\n raise NotImplementedError(\"curr_augmentation == \"+str(curr_augmentation)+\" not recognized for application\")\r\n \r\n # print(\"SADataset.apply_augmentations: x shape == \", x.shape)\r\n return x", "def _image_augmentation(image_features):\n batch_size = tf.shape(image_features)[0]\n image_features = tf.image.random_flip_left_right(image_features)\n image_features = tf.image.resize_with_crop_or_pad(image_features, constants.HEIGHT + 30, constants.WIDTH + 30)\n image_features = tf.image.random_crop(image_features, (batch_size, constants.HEIGHT, constants.WIDTH, 3))\n return image_features", "def plot_with_augmentation(image, mask, augment):\n augmented = augment(image=image, mask=mask)\n image_flipped = augmented[\"image\"]\n mask_flipped = augmented[\"mask\"]\n visualize(image_flipped, mask_flipped, original_image=image, original_mask=mask)", "def image_mask_augmentation(x, y, batch_size=4, transformations=None, seed=6):\n # Always perform some basic transformations\n if transformations is None:\n transformations = dict(\n rotation_range=10.0,\n height_shift_range=0.02,\n shear_range=5,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode=\"constant\"\n )\n\n datagen_x = ImageDataGenerator(**transformations)\n datagen_x.fit(x, augment=True, seed=seed)\n datagen_y = ImageDataGenerator(**transformations)\n datagen_y.fit(y, augment=True, seed=seed)\n\n x_aug = datagen_x.flow(x, batch_size=batch_size, seed=seed)\n y_aug = datagen_y.flow(y, batch_size=batch_size, seed=seed)\n\n generator = zip(x_aug, y_aug)\n\n return generator", "def aug_imgs(in_path, out_path):\n names = os.listdir(in_path)\n aug = iaa.Sequential([iaa.AdditiveGaussianNoise(scale=0.01*255), iaa.Fliplr(p=0.5), iaa.Affine(shear=-10)],\n random_order=True)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n img_new = aug.augment_image(img)\n cv2.imwrite(out_path + \"videoAug_neg_2_\"+str(i)+\".jpg\", img_new)\n\n return", "def data_augmentation(im_list, mode='standard', tag=False, params=None, im_size=224,\n filemode='local', mean_RGB=None):\n if mean_RGB is None:\n mean_RGB = np.array([107.59348955, 112.1047813, 80.9982362])\n else:\n mean_RGB = np.array(mean_RGB)\n rot_ang = [0, 90, 180, 270]\n batch = []\n if tag:\n tag_list = im_list[:, 1]\n im_list = im_list[:, 0]\n if mode == 'minimal':\n params = {'mirror': False, 'rescale': False, 'crop_size': False}\n if mode == 'standard':\n params = {'mirror': True, 'rescale': 0.3, 'zoom': 0.3, 'crop_size': 1.}\n if mode == 'test':\n params = {'mirror': True, 'rescale': 0.1, 'zoom': 0.1, 'crop_size': .9}\n for i, filename in enumerate(im_list):\n if filemode == 'local':\n im = Image.open(filename)\n im = im.convert('RGB')\n elif filemode == 'url':\n filename = BytesIO(requests.get(filename).content)\n im = Image.open(filename)\n im = im.convert('RGB')\n if params['mirror'] and np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n if params['mirror'] and tag and tag_list[i] != 'habit':\n if np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n rot = np.random.choice(rot_ang)\n if rot == 90:\n im = im.transpose(Image.ROTATE_90)\n if rot == 180:\n im = im.transpose(Image.ROTATE_180)\n if rot == 270:\n im = im.transpose(Image.ROTATE_270)\n if params['rescale']:\n rescale = params['rescale']\n new_scale = np.random.uniform(low=1.-rescale, high=1.+rescale, size=2)\n im = im.resize((im.size * new_scale).astype(int))\n if params['crop_size']:\n zoom = np.random.rand() * params['zoom']\n crop_size = params['crop_size'] * (1.-zoom)\n ly, lx = im.size\n min_side = min([ly, lx])\n if crop_size == 1:\n crop_size -= 1e-10 # avoid low=high problem of randint generator\n if ly > lx:\n rand_x = np.random.randint(low=0, high=lx*(1.-crop_size))\n rand_y = np.random.randint(low=0, high=ly-lx*crop_size)\n else:\n rand_x = np.random.randint(low=0, high=lx-ly*crop_size)\n rand_y = np.random.randint(low=0, high=ly*(1.-crop_size))\n rand_xy = np.array([rand_y, rand_x])\n im = im.crop(np.concatenate((rand_xy, rand_xy+crop_size*min_side)))\n im = im.resize((im_size, im_size))\n batch.append(np.array(im)) # shape (N, 224, 224, 3)\n\n batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering\n batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)\n batch = batch[:, ::-1, :, :] # switch from RGB to BGR\n return batch.astype(np.float32)", "def get_validation_augmentation():\n test_transform = [\n A.PadIfNeeded(min_height=512, min_width=512, always_apply=True, border_mode=cv2.BORDER_REPLICATE),\n A.Resize(height = SHAPE, width = SHAPE, interpolation=1, always_apply=True, p=1)\n ]\n return A.Compose(test_transform)", "def data_augmentation(image, mode):\n out = np.transpose(image, (1, 2, 0))\n if mode == 0:\n # original\n out = out\n elif mode == 1:\n # flip up and down\n out = np.flipud(out)\n elif mode == 2:\n # rotate counterwise 90 degree\n out = np.rot90(out)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n out = np.rot90(out)\n out = np.flipud(out)\n elif mode == 4:\n # rotate 180 degree\n out = np.rot90(out, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n out = np.rot90(out, k=2)\n out = np.flipud(out)\n elif mode == 6:\n # rotate 270 degree\n out = np.rot90(out, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n out = np.rot90(out, k=3)\n out = np.flipud(out)\n else:\n raise Exception('Invalid choice of image transformation')\n return np.transpose(out, (2, 0, 1))", "def apply_augmentations(self, x, augmentations):\r\n # print(\"\\n#####################\\nSQDataset.apply_augmentations: x shape == \", x.shape)\r\n # print(\"SQDataset.apply_augmentations: augmentations == \", augmentations)\r\n assert len(augmentations) == 11\r\n for j, curr_augmentation_set in enumerate(augmentations):\r\n assert len(list(curr_augmentation_set.keys())) == 2\r\n # print(\"SQDataset.apply_augmentations: len(list(curr_augmentation_set.keys())) == 2\")\r\n # print(\"\\tSQDataset.apply_augmentations: curr_augmentation_set == \", curr_augmentation_set)\r\n for _, curr_augmentation in enumerate(list(curr_augmentation_set.keys())):\r\n # print(\"\\t\\tSQDataset.apply_augmentations: curr_augmentation == \", curr_augmentation)\r\n curr_augmentation_val = curr_augmentation_set[curr_augmentation]\r\n\r\n if curr_augmentation == 'amplitude_scale':\r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING amplitude_scale AUGMENTATION\")\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n x[:,j] = curr_augmentation_val * x[:,j]\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'DC_shift':\r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING DC_shift AUGMENTATION\")\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n x[:,j] = x[:,j] + curr_augmentation_val\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'additive_Gaussian_noise':\r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING additive_Gaussian_noise AUGMENTATION\")\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: np.random.normal(0, curr_augmentation_val, x[:,j].shape) shape == \", np.random.normal(0, curr_augmentation_val, x[:,j].shape).shape)\r\n x[:,j] = x[:,j] + np.random.normal(0, curr_augmentation_val, x[:,j].shape)# see https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python and https://numpy.org/doc/stable/reference/random/generated/numpy.random.normal.html\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'band-stop_filter':\r\n \"\"\"\r\n see:\r\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirnotch.html\r\n https://www.programcreek.com/python/example/115815/scipy.signal.iirnotch\r\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html\r\n \"\"\"\r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING band-stop_filter AUGMENTATION\")\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: curr_augmentation_val == \", curr_augmentation_val)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: curr_augmentation_val/self.BW == \", curr_augmentation_val/self.BW)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: self.SFREQ == \", self.SFREQ)\r\n b, a = iirnotch(curr_augmentation_val, curr_augmentation_val/self.BW, self.SFREQ)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: b == \", b)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: a == \", a)\r\n x[:,j] = lfilter(b, a, x[:,j])\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n elif curr_augmentation == 'time_shift':\r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING time_shift AUGMENTATION\")\r\n if curr_augmentation_val != 0:\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n new_signal = np.zeros(x[:,j].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal shape == \", new_signal.shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: curr_augmentation_val == \", curr_augmentation_val)\r\n if curr_augmentation_val < 0:\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal[:curr_augmentation_val] shape == \", new_signal[:curr_augmentation_val].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[np.abs(curr_augmentation_val):,j] shape == \", x[np.abs(curr_augmentation_val):,j].shape)\r\n new_signal[:curr_augmentation_val] = x[np.abs(curr_augmentation_val):,j]\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal[curr_augmentation_val:] shape == \", new_signal[curr_augmentation_val:].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:np.abs(curr_augmentation_val),j] shape == \", x[:np.abs(curr_augmentation_val),j].shape)\r\n new_signal[curr_augmentation_val:] = x[:np.abs(curr_augmentation_val),j]\r\n else:\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal[:curr_augmentation_val] shape == \", new_signal[:curr_augmentation_val].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[-curr_augmentation_val:,j] shape == \", x[-curr_augmentation_val:,j].shape)\r\n new_signal[:curr_augmentation_val] = x[-curr_augmentation_val:,j]\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal[curr_augmentation_val:] shape == \", new_signal[curr_augmentation_val:].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:-curr_augmentation_val,j] shape == \", x[:-curr_augmentation_val,j].shape)\r\n new_signal[curr_augmentation_val:] = x[:-curr_augmentation_val,j]\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: new_signal shape == \", new_signal.shape)\r\n x[:,j] = new_signal\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[:,j] shape == \", x[:,j].shape)\r\n # else:\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: curr_augmentation_val == 0 -> SKIPPING\")\r\n elif curr_augmentation == 'zero-masking': \r\n # print(\"\\n\\t\\t\\tSQDataset.apply_augmentations: NOW APPLYING zero-masking AUGMENTATION\")\r\n # print(\"\\t\\t\\tSQDataset.apply_augmentations: x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j] shape == \", x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j].shape)\r\n x[curr_augmentation_val[1]:curr_augmentation_val[1]+curr_augmentation_val[0], j] = 0.\r\n else:\r\n raise NotImplementedError(\"curr_augmentation == \"+str(curr_augmentation)+\" not recognized for application\")\r\n \r\n # print(\"SQDataset.apply_augmentations: x shape == \", x.shape)\r\n return x", "def data_augmentation(input_image_list,\n segmentation_image_list=None,\n number_of_simulations=10,\n reference_image=None,\n transform_type='affineAndDeformation',\n noise_model='additivegaussian',\n noise_parameters=(0.0, 0.05),\n sd_simulated_bias_field=0.05,\n sd_histogram_warping=0.05,\n output_numpy_file_prefix=None,\n verbose=False\n ):\n\n from ..utilities import histogram_warp_image_intensities\n from ..utilities import simulate_bias_field\n\n if reference_image is None:\n reference_image = input_image_list[0][0]\n\n number_of_modalities = len(input_image_list[0])\n\n # Set up numpy arrays if outputing to file.\n\n batch_X = None\n batch_Y = None\n if output_numpy_file_prefix is not None:\n batch_X = np.zeros((number_of_simulations, *reference_image.shape, number_of_modalities))\n if segmentation_image_list is not None:\n batch_Y = np.zeros((number_of_simulations, *reference_image.shape))\n\n # Spatially transform input image data\n\n if verbose:\n print(\"Randomly spatially transforming the image data.\")\n\n transform_augmentation = antspynet.randomly_transform_image_data(reference_image,\n input_image_list=input_image_list,\n segmentation_image_list=segmentation_image_list,\n number_of_simulations=number_of_simulations,\n transform_type=transform_type,\n sd_affine=0.01,\n deformation_transform_type=\"bspline\",\n number_of_random_points=1000,\n sd_noise=2.0,\n number_of_fitting_levels=4,\n mesh_size=1,\n sd_smoothing=4.0,\n input_image_interpolator='linear',\n segmentation_image_interpolator='nearestNeighbor')\n\n simulated_image_list = list()\n simulated_segmentation_image_list = list()\n\n for i in range(number_of_simulations):\n\n if verbose:\n print(\"Processing simulation \" + str(i))\n\n segmentation = None\n if segmentation_image_list is not None:\n segmentation = transform_augmentation['simulated_segmentation_images'][i]\n simulated_segmentation_image_list.append(segmentation)\n if batch_Y is not None:\n if reference_image.dimension == 2:\n batch_Y[i, :, :] = segmentation.numpy()\n else:\n batch_Y[i, :, :, :] = segmentation.numpy()\n\n\n for j in range(number_of_modalities):\n\n simulated_local_image_list = list()\n\n if verbose:\n print(\" Modality \" + str(j))\n\n image = transform_augmentation['simulated_images'][i][j]\n image_range = image.range()\n\n # Normalize to [0, 1] before applying augmentation\n\n if verbose:\n print(\" Normalizing to [0, 1].\")\n\n image = ants.iMath(image, \"Normalize\")\n\n # Noise\n\n if noise_model is not None:\n\n if verbose:\n print(\" Adding noise (\" + noise_model + \").\")\n\n if noise_model.lower() == \"additivegaussian\":\n parameters = (noise_parameters[0], random.uniform(0.0, noise_parameters[1]))\n image = ants.add_noise_to_image(image,\n noise_model=\"additivegaussian\",\n noise_parameters=parameters)\n elif noise_model.lower() == \"saltandpepper\":\n parameters = (random.uniform(0.0, noise_parameters[0]), noise_parameters[1], noise_parameters[2])\n image = ants.add_noise_to_image(image,\n noise_model=\"saltandpepper\",\n noise_parameters=parameters)\n elif noise_model.lower() == \"shot\":\n parameters = (random.uniform(0.0, noise_parameters[0]))\n image = ants.add_noise_to_image(image,\n noise_model=\"shot\",\n noise_parameters=parameters)\n elif noise_model.lower() == \"speckle\":\n parameters = (random.uniform(0.0, noise_parameters[0]))\n image = ants.add_noise_to_image(image,\n noise_model=\"speckle\",\n noise_parameters=parameters)\n else:\n raise ValueError(\"Unrecognized noise model.\")\n\n\n # Simulated bias field\n\n if sd_simulated_bias_field > 0:\n\n if verbose:\n print(\" Adding simulated bias field.\")\n\n bias_field = antspynet.simulate_bias_field(image,\n sd_bias_field=sd_simulated_bias_field)\n image = image * (bias_field + 1)\n\n # Histogram intensity warping\n\n if sd_histogram_warping > 0:\n\n if verbose:\n print(\" Performing intensity histogram warping.\")\n\n break_points = [0.2, 0.4, 0.6, 0.8]\n displacements = list()\n for b in range(len(break_points)):\n displacements.append(random.gauss(0, sd_histogram_warping))\n image = antspynet.histogram_warp_image_intensities(image,\n break_points=break_points,\n clamp_end_points=(False, False),\n displacements=displacements)\n\n # Rescale to original intensity range\n\n if verbose:\n print(\" Rescaling to original intensity range.\")\n\n image = ants.iMath(image, \"Normalize\") * (image_range[1] - image_range[0]) + image_range[0]\n\n simulated_local_image_list.append(image)\n\n if batch_X is not None:\n if reference_image.dimension == 2:\n batch_X[i, :, :, j] = image.numpy()\n else:\n batch_X[i, :, :, :, j] = image.numpy()\n\n\n simulated_image_list.append(simulated_local_image_list)\n\n if batch_X is not None:\n if verbose:\n print(\"Writing images to numpy array.\")\n np.save(output_numpy_file_prefix + \"SimulatedImages.npy\", batch_X)\n if batch_Y is not None:\n if verbose:\n print(\"Writing segmentation images to numpy array.\")\n np.save(output_numpy_file_prefix + \"SimulatedSegmentationImages.npy\", batch_Y)\n\n if segmentation_image_list is None:\n return({'simulated_images' : simulated_image_list})\n else:\n return({'simulated_images' : simulated_image_list,\n 'simulated_segmentation_images' : simulated_segmentation_image_list})", "def __augmented_images(self, info, start):\n count = start\n final_img_to_save = []\n for pair in info:\n processedImage = self.__processImage(os.path.join(WORKING_DIR, pair[0]))\n if processedImage == None:\n continue\n # translation is not that important since CNNs are resistant to image translations\n rotatedImages = self.__applyRotations(processedImage)\n\n rotCount = 1\n for img in rotatedImages:\n filename = str(count) + \"_\" + str(rotCount) + \".jpg\"\n # img.save(os.path.join(directory, filename))\n final_img_to_save.append((img, pair[1], filename))\n rotCount += 1\n\n print(\"Augmenting image: {:05}\".format(count))\n count += 1\n return final_img_to_save", "def run_example_augmentations():\n parser = argparse.ArgumentParser(description='Visualise example augmentations')\n parser.add_argument('--dataDir', type=str, required=True,\n help='Directory containing training data stored in the expected format. See dataset_cvppp.py')\n parser.add_argument('--outputDir', type=str, required=True,\n help='Directory to save example images to')\n parser.add_argument('--numImages', type=int, default=30,\n help='How many images to save')\n parser.add_argument('--blurImages', dest='blurImages', action='store_true')\n parser.add_argument('--dontBlurImages', dest='blurImages', action='store_false')\n parser.set_defaults(blurImages=False)\n\n args = parser.parse_args()\n\n # Create output dir\n assert not os.path.isdir(args.outputDir), \"output dir already exists\"\n os.mkdir(args.outputDir)\n\n # # Init dataset\n train_dataset = dataset_cvppp.CVPPP_Dataset()\n \n train_dataset.load_cvppp(args.dataDir, 'train')\n train_dataset.prepare()\n\n # Init config\n configuration = config_cvppp.TrainConfig()\n\n # Init augmentation\n augmentation = get_augmentation_sequence()\n\n # Generate images\n for i in range(args.numImages):\n image, meta, class_ids, bbox, mask = model.load_image_gt(train_dataset, configuration, i, augmentation=augmentation)\n\n rgb_mask = mask_to_rgb(mask)\n\n im_path = os.path.join(args.outputDir, str(i) + '_image.png')\n mask_path = os.path.join(args.outputDir, str(i) + '_mask.png')\n io.imsave(im_path, image)\n io.imsave(mask_path, rgb_mask)\n\n print(\"Saved example\", i)", "def augmentAffine(img_in, seg_in, strength=0.05):\n B,C,D,H,W = img_in.size()\n affine_matrix = (torch.eye(3,4).unsqueeze(0) + torch.randn(B, 3, 4) * strength).to(img_in.device)\n\n meshgrid = F.affine_grid(affine_matrix,torch.Size((B,1,D,H,W)))\n\n img_out = F.grid_sample(img_in, meshgrid,padding_mode='border')\n seg_out = F.grid_sample(seg_in.float().unsqueeze(1), meshgrid, mode='nearest').long().squeeze(1)\n\n return img_out, seg_out", "def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:\n max_kernel = int(round(0.1 * window_radius))\n max_hole_size = int(round(0.1 * window_radius))\n additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]\n\n return albumentations.Compose(\n [\n # The augmentations assume an image is RGB between 0 and 1\n albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),\n # These augmentations should be order independent, toss 'em up front\n albumentations.Flip(p=0.5),\n albumentations.Transpose(p=0.5),\n albumentations.Rotate(limit=90, p=0.5),\n # Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front\n albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),\n # Color modifications\n albumentations.OneOf(\n [\n albumentations.RandomBrightnessContrast(\n brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0\n ),\n albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),\n ],\n p=0.25,\n ),\n # Distortions\n albumentations.OneOf(\n [\n albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),\n albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),\n albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),\n ],\n p=0.25,\n ),\n albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),\n # Noise\n albumentations.OneOf(\n [\n albumentations.CoarseDropout(\n max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0\n ),\n albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),\n ],\n p=0.25,\n ),\n # Scaling, adding last so that other augmentations are applied at a consistent resolution\n albumentations.RandomScale(scale_limit=0.05, p=0.25),\n # Augmentations may not return images of the same size, images can be both smaller and larger than expected, so\n # these two augmentations are added to keep things consistent\n albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),\n albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),\n # Return the data to its original scale\n albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),\n ],\n p=1.0,\n additional_targets={target: \"image\" for target in additional_targets},\n )", "def img_and_key_point_augmentation(augmentation, img, bbox, key_points):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_and_key_point_augmentation(augmentation, img, bbox, key_points)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n after_key_points = list()\n for key_point_list in key_points:\n after_key_point_list = list()\n for key_point in key_point_list:\n xy_points = list()\n for i, x in enumerate(key_point[::2]):\n y = key_point[(i * 2) + 1]\n xy_points.append(ia.Keypoint(x=x, y=y))\n\n keypoints_on_image = det.augment_keypoints([ia.KeypointsOnImage(xy_points, shape=image_shape)])\n # img = keypoints_on_image[0].draw_on_image(img)\n\n xy_points = list()\n for key_point in keypoints_on_image[0].keypoints:\n kp = [key_point.x_int, key_point.y_int]\n if 0 > min(kp) or w <= max(kp[::2]) or h <= max(kp[1::2]):\n # print(kp)\n return img_and_key_point_augmentation(augmentation, img, bbox, key_points)\n xy_points.extend(kp)\n\n after_key_point_list.append(xy_points)\n\n after_key_points.append(after_key_point_list)\n\n img_aug = det.augment_image(img)\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox, after_key_points", "def _augment_images(self, images, random_state, parents, hooks):\n nb_images = len(images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i in sm.xrange(nb_images):\n if samples[i] == 1:\n if self.axis == 1:\n images[i] = np.fliplr(images[i])\n elif self.axis == 0:\n images[i] = np.flipud(images[i])\n self.samples = samples\n return images", "def cifar_image_augmentation(images):\n images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)\n images = tf.random_crop(images, [32, 32, 3])\n images = tf.image.random_flip_left_right(images)\n return images", "def main():\n batch_size = 64\n nb_runs = 20\n\n # Measure time required to generate 100k augmentation matrices\n \"\"\"\n print(\"Generating 100 times 1000 augmentation matrices of size 64x64...\")\n start = time.time()\n for _ in range(100):\n create_aug_matrices(1000, 64, 64,\n scale_to_percent=1.5, scale_axis_equally=False,\n rotation_deg=20, shear_deg=20,\n translation_x_px=5, translation_y_px=5)\n print(\"Done in %.8f\" % (time.time() - start,))\n \"\"\"\n\n # Test Performance on 64 images of size 512x512 pixels\n image = data.lena()\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[0], image.shape[1],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n print(\"(With 1000 pregenerated matrices)\")\n augmenter.pregenerate_matrices(1000)\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n # Test Performance on 64 images of size 64x64 pixels\n image = data.lena()\n image = misc.imresize(image, (64, 64))\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[0], image.shape[1],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n run_tests(augmenter, images, nb_runs)\n\n print(\"Running tests on %d images of shape %s\" % (batch_size, str(image.shape)))\n print(\"(With 1000 pregenerated matrices)\")\n augmenter.pregenerate_matrices(1000)\n run_tests(augmenter, images, nb_runs)\n print(\"\")\n\n # Time required to augment 1,000,000 images of size 32x32\n print(\"Augmenting 1000 batches of 1000 lena images (1 million total)\" \\\n \", each of size 32x32...\")\n image = data.lena()\n image = misc.imresize(image, (32, 32))\n batch_size = 1000\n images = np.resize(image, (batch_size, image.shape[0], image.shape[1], image.shape[2]))\n augmenter = ImageAugmenter(image.shape[1], image.shape[0],\n hflip=True, vflip=True,\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n augmenter.pregenerate_matrices(1000)\n\n start = time.time()\n for _ in range(1000):\n augmenter.augment_batch(images)\n print(\"Done in %.8fs\" % (time.time() - start,))\n print(\"\")\n\n # Time required to augment 1,000,000 images of size 32x32\n # but using only one matrix without the class (no library overhead from\n # ImageAugmenter)\n # Notice that this does not include horizontal and vertical flipping,\n # which is done via numpy in the ImageAugmenter class.\n print(\"Augmenting 1000 batches of 1000 lena images (1 million total)\" \\\n \", each of size 32x32, using one matrix directly (no ImageAugmenter \" \\\n \"class)...\")\n matrices = create_aug_matrices(1, image.shape[1], image.shape[0],\n scale_to_percent=1.3, scale_axis_equally=False,\n rotation_deg=25, shear_deg=10,\n translation_x_px=5, translation_y_px=5)\n matrix = matrices[0]\n\n start = time.time()\n for _ in range(1000):\n for image in images:\n augmented_image = tf.warp(image, matrix)\n print(\"Done in %.8fs\" % (time.time() - start,))", "def __getitem__(self, idx):\n\n def load_image_mask(idx):\n img = cv2.imread(os.path.join(self.img_path, self.img_files[idx]))\n mask = cv2.imread(os.path.join(self.mask_path, self.img_files[idx]), cv2.IMREAD_GRAYSCALE)\n return img, mask\n\n # retrieve current image index and current augmentation index\n curr_img_idx, curr_augm_idx = self.__get_img_augm_idx__(idx)\n\n batch_img = []\n batch_mask = []\n\n img, mask = load_image_mask(curr_img_idx)\n batch_gen_iter = 0\n\n # generate AT MOST self.batch_size images\n\n while batch_gen_iter < self.batch_size:\n\n if curr_augm_idx < self.gen_count:\n\n # there are still augmentations to generate for current image\n # let's generate them\n\n if mask is None:\n print(f\"== WARNING: Image {self.img_files[curr_img_idx]}\" +\n f\"does not have corresponding mask in \\\"{self.mask_path}\\\"; skipping ==\")\n\n else:\n crop_res = self.crop_compose(image=img, mask=mask)\n augm_img, augm_mask = crop_res[\"image\"], crop_res[\"mask\"]\n\n if curr_augm_idx != 0 and self.augm:\n augm_res = self.augm_compose(image=augm_img, mask=augm_mask)\n augm_img, augm_mask = augm_res[\"image\"], augm_res[\"mask\"]\n\n # threshold and transform mask for NN model\n\n _, augm_mask = cv2.threshold(augm_mask, 127, 255, cv2.THRESH_BINARY)\n augm_mask = np.stack([(augm_mask == 255)], axis=-1).astype('float')\n\n # append augmented image and mask to batches\n\n batch_img.append(augm_img)\n batch_mask.append(augm_mask)\n\n curr_augm_idx += 1\n batch_gen_iter += 1\n\n else:\n\n # all augmentations for current images have been generated\n # move to next image\n\n curr_img_idx += 1\n curr_augm_idx = 0\n\n if curr_img_idx < len(self.img_files):\n img, mask = load_image_mask(curr_img_idx)\n else:\n break\n\n return np.array(batch_img), np.array(batch_mask)", "def augmentation(element: str, output: str, factor: int) -> None:\n\n out_filename = get_output_filename(element, output, -1)\n\n try:\n os.makedirs(\"/\".join(out_filename.split(\"/\")[:-1]))\n except:\n pass\n\n im = ImageOperations.load(element)\n ImageOperations.save(im, path=out_filename)\n\n for i in range(factor):\n out_filename = get_output_filename(element, output, i)\n im_aug = copy.deepcopy(im)\n for operation in set(random.sample(operations, k=random.randint(0, len(operations)))):\n im_aug = operation(im_aug)\n\n ImageOperations.save(im_aug, path=out_filename)", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def apply_augmentations(self, x, y):\n augmentation_list = []\n\n if self.flip_h and choice([True, False]):\n augmentation_list.append(np.fliplr)\n if self.flip_v and choice([True, False]):\n augmentation_list.append(np.flipud)\n\n for t_fn in augmentation_list:\n x = t_fn(x)\n\n if self.mltype == 'segmentation':\n y = t_fn(y)\n\n elif self.mltype == 'object_detection':\n if t_fn is np.fliplr:\n y = flip_labels_horizontal(self.shape, y)\n elif t_fn is np.flipud:\n y = flip_labels_vertical(self.shape, y)\n \n return x, y", "def adjust(self, image):\n ...", "def generate_transformations(self):\n if self.perform_aug:\n print(\"\\nAugmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.RandomRotation(degrees=45),\n transforms.ColorJitter(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n else:\n print(\"\\nNo augmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n\n return data_transforms", "def get_validation_augmentation():\n test_transform = [\n A.PadIfNeeded(min_height=512, min_width=512, always_apply=True, border_mode=0),\n A.Resize(height = RESIZE, width = RESIZE, interpolation=1, always_apply=True, p=1)\n ]\n return A.Compose(test_transform)", "def __call__(self, video_sequence):\n for aug_op in self.augmentations:\n video_sequence = aug_op(video_sequence)\n return video_sequence", "def preprocess(self, img):\n return img - np.mean(img)", "def get_validation_augmentation():\n test_transform = [\n A.PadIfNeeded(480, 864)\n ]\n return A.Compose(test_transform)", "def augmentedData(trainingData):\n datagen = ImageDataGenerator (\n width_shift_range=0.075,\n height_shift_range=0.075,\n rotation_range=12,\n shear_range=0.075,\n zoom_range=0.05,\n fill_mode='constant',\n cval=0\n )\n\n datagen.fit(trainingData)\n return datagen", "def transform(self, previousimage):", "def load_image_gt(dataset, config, image_id, augmentation=None):\n # Load image and mask\n image = dataset.load_image(image_id)\n global_mask, mask, class_ids, class_ids2, text_embeds, embed_lengths = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n # TODO\n # global_mask = utils.resize_mask(global_mask, scale, padding, crop)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation and dataset.image_info[image_id]['source'] not in config.NO_AUGMENT_SOURCES:\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # global_mask = det.augment_image(global_mask)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n if config.SOFT_MASK:\n mask *= 255\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n global_mask = det.augment_image(global_mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n \n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n if not config.SOFT_MASK:\n mask = mask.astype(np.bool)\n global_mask = global_mask.astype(np.bool)\n else:\n mask = np.array(mask/255., np.float32)\n\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n \n class_ids = class_ids[_idx]\n class_ids2 = class_ids2[_idx]\n # NOTE NOTE NOTE if label2 is derection, augmentation mast be care hare\n # ------------------------------------------------------------\n def rot90_augment(image, mask, global_mask, class_ids2):\n k = random.choice([0, 1, 2, 3])\n if k:\n image = np.rot90(image, k)\n mask = np.rot90(mask, k)\n global_mask = np.rot90(global_mask, k)\n map_dict = {1: dict(zip([0,1,2,3], [1,2,3,0])),\n 2: dict(zip([0,1,2,3], [2,3,0,1])),\n 3: dict(zip([0,1,2,3], [3,0,1,2]))}\n class_ids2 = np.array([map_dict[k][i] for i in class_ids2])\n return image, mask, global_mask, class_ids2\n image, mask, global_mask, class_ids2 = rot90_augment(image, mask, global_mask, class_ids2)\n text_embeds = text_embeds[_idx]\n embed_lengths = embed_lengths[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox, mask_score = utils.extract_bboxes(mask)\n rbbox = utils.extract_minienclose_bboxes(mask)\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n # print (\"dataset.source_class_ids\", dataset.source_class_ids)\n # dataset.source_class_ids {'': [0], 'coco_label2': [0, 8, 9, 10, 11], 'coco': [0, 1, 2, 3, 4, 5, 6, 7]}\n source_class_ids2 = dataset.source_class_ids['coco_label2']\n active_class_ids[source_class_ids2[1: ]] = 1\n active_class_ids2 = active_class_ids[config.NUM_CLASSES: ]\n active_class_ids = active_class_ids[: config.NUM_CLASSES]\n \n # Resize masks to smaller size to reduce memory usage\n if config.USE_MINI_MASK:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE, softmask=config.SOFT_MASK)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids, active_class_ids2)\n\n\n return image, image_meta, class_ids, class_ids2, bbox, rbbox, global_mask, mask, mask_score, text_embeds, embed_lengths", "def _build_augmentation_map(self, images):\n aug_map = {}\n img_shape = (images[0].x.shape[0], images[0].x.shape[1])\n\n vert_modes = [Crop.TOP, Crop.CENTER, Crop.BOTTOM]\n horiz_modes = [Crop.LEFT, Crop.CENTER, Crop.RIGHT]\n crop_modes = flatten_list([[CropMode(vert, horiz) for horiz in horiz_modes] for vert in vert_modes])\n\n labels_series = pd.Series([i.y for i in images])\n labels_distribution = labels_series.value_counts(normalize=True).sort_values()\n\n min_distribution = labels_distribution.iloc[0] * len(crop_modes)\n\n for label, distribution in labels_distribution.iteritems():\n aug_num = math.ceil(min_distribution / distribution)\n #additional augmentation functions can be added here:\n aug_map[label] = [self._build_crop_fn(img_shape, crop_modes[:aug_num])]\n \n return aug_map", "def _augment_sample(self, image=None, label=None, as_tuple=False):\n # Use augmentation, if requested\n if label is None:\n assert(self._aug.opts['aug_labels'] is False)\n # TODO Test this code path with basic horizontal flipping\n aug_image = self._aug.augment([image], None, as_tuple)\n image = aug_image[0]\n else:\n aug_image, aug_label = self._aug.augment([image], [label], as_tuple)\n image, label = aug_image[0], aug_label[0]\n\n # Return image and label\n if as_tuple:\n return (image[0], image[1]), label\n else:\n return np.array([image[0], image[1]]), label", "def augment(self, image):\n\n contrast = ImageEnhance.Contrast(image)\n return contrast.enhance(self._contrast)", "def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):\n pil_img = Image.fromarray(image)\n\n ws = np.float32(\n np.random.dirichlet([alpha] * width))\n m = np.float32(np.random.beta(alpha, alpha))\n\n mix = np.zeros_like(image, dtype=np.float32)\n for i in range(width):\n image_aug = pil_img.copy()\n depth = depth if depth > 0 else np.random.randint(1, 4)\n for _ in range(depth):\n op = np.random.choice(augmentations.augmentations)\n image_aug = op(image_aug, severity)\n # Preprocessing commutes since all coefficients are convex\n mix += ws[i] * np.asarray(image_aug, dtype=np.float32)\n\n mixed = (1 - m) * image + m * mix\n\n return mixed", "def get_validation_augmentation():\r\n test_transform = [\r\n albu.Resize(192, 192)\r\n ]\r\n return albu.Compose(test_transform)", "def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img", "def augment(self, directory, crop_ratio=0.75):\n aug_dir = os.path.join(directory, self.AUGMENTED_DIR)\n \n if os.path.exists(aug_dir):\n clear_dir(aug_dir)\n else:\n create_dirs([aug_dir])\n\n images = self._load_dir(directory)\n\n aug_map = self._build_augmentation_map(images)\n\n for i in images:\n for transformation in aug_map[i.y]:\n transformation(i, aug_dir)", "def __call__(self, in_data):\n # There are five data augmentation steps\n # 1. Color augmentation\n # 2. Random expansion\n # 3. Random cropping\n # 4. Resizing with random interpolation\n # 5. Random horizontal flipping\n if self.count % 10 == 0 and self.count % self.batchsize == 0 and self.count != 0:\n self.i += 1\n i = self.i % len(self.dim)\n self.output_shape = (self.dim[i], self.dim[i])\n # print(self.count, self.i, self.output_shape)\n self.count += 1\n\n img, bbox, label = in_data\n\n # 1. Color augmentation\n img = random_distort(img, brightness_delta=32,\n contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5,\n hue_delta=25)\n\n # Normalize. range is [0, 1]\n img /= 255.0\n\n _, H, W = img.shape\n scale = np.random.uniform(0.25, 2)\n random_expand = np.random.uniform(0.8, 1.2, 2)\n net_h, net_w = self.output_shape\n out_h = net_h * scale # random_expand[0]\n out_w = net_w * scale # random_expand[1]\n if H > W:\n out_w = out_h * (float(W) / H) * np.random.uniform(0.8, 1.2)\n elif H < W:\n out_h = out_w * (float(H) / W) * np.random.uniform(0.8, 1.2)\n\n out_h = int(out_h)\n out_w = int(out_w)\n\n img = resize_with_random_interpolation(img, (out_h, out_w))\n bbox = transforms.resize_bbox(bbox, (H, W), (out_h, out_w))\n\n if out_h < net_h and out_w < net_w:\n img, param = expand(img, out_h=net_h, out_w=net_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n else:\n out_h = net_h if net_h > out_h else int(out_h * 1.05)\n out_w = net_w if net_w > out_w else int(out_w * 1.05)\n img, param = expand(img, out_h=out_h, out_w=out_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n img, param = crop_with_bbox_constraints(\n img, bbox, return_param=True,\n crop_height=net_h, crop_width=net_w)\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],\n allow_outside_center=False, return_param=True)\n label = label[param['index']]\n\n\n # 5. Random horizontal flipping # OK\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, self.output_shape, x_flip=params['x_flip'])\n\n # Preparation for Yolov2 network\n bbox[:, ::2] /= self.output_shape[0] # y\n bbox[:, 1::2] /= self.output_shape[1] # x\n\n num_bbox = len(bbox)\n len_max = max(num_bbox, self.max_target)\n\n gmap = create_map_anchor_gt(bbox, self.anchors, self.output_shape,\n self.downscale, self.n_boxes, len_max)\n\n out_bbox = np.zeros((len_max, 4), dtype='f')\n out_bbox[:num_bbox] = bbox[:num_bbox]\n out_label = np.zeros((len_max), dtype='i')\n out_label[:num_bbox] = label\n\n gmap = gmap[:self.max_target]\n out_bbox = out_bbox[:self.max_target]\n out_label = out_label[:self.max_target]\n num_array = min(num_bbox, self.max_target)\n\n img = np.clip(img, 0, 1)\n return img, out_bbox, out_label, gmap, np.array([num_array], dtype='i')", "def augment_images(images, measurements, correction=0.0):\r\n aug_imgs, aug_msrs = [], []\r\n for image, measurement, in zip(images, measurements):\r\n corr_msr = measurement + correction\r\n aug_imgs.append(image)\r\n aug_msrs.append(corr_msr)\r\n aug_imgs.append(cv2.flip(image, 1))\r\n aug_msrs.append(corr_msr*-1)\r\n return aug_imgs, aug_msrs", "def augment(self, face_img: np.ndarray, nface_img: np.ndarray, leye_img: np.ndarray, reye_img: np.ndarray,\n lndmks: np.ndarray , y: np.ndarray=None, keep: bool=False, last_state: dict=None):\n\n # Check if we use existing augmentations or we have to compute new ones\n if keep:\n assert(set(last_state) == set(self.last_state))\n else:\n last_state = dict(self.last_state)\n\n # Vertical flip\n if self.dict['vertical_flip']:\n if (not keep and np.random.random() < self.flip_prob) \\\n or (keep and last_state['vertical_flip']):\n face_img = flip_axis(face_img, 0)\n nface_img = flip_axis(nface_img, 0)\n leye_img = flip_axis(leye_img, 0)\n reye_img = flip_axis(reye_img, 0)\n lndmks[:, 1] = - lndmks[:, 1]\n y[1] = -y[1]\n last_state['vertical_flip'] = True\n else:\n last_state['vertical_flip'] = False\n\n # Horizontal flip\n if self.dict['horizontal_flip']:\n if (not keep and np.random.random() < self.flip_prob) \\\n or (keep and last_state['horizontal_flip']):\n face_img = flip_axis(face_img, 1)\n nface_img = flip_axis(nface_img, 1)\n leye_img_t = flip_axis(leye_img, 1) # change left-right order\n reye_img_t = flip_axis(reye_img, 1)\n leye_img = reye_img_t\n reye_img = leye_img_t\n lndmks[:, 0] = - lndmks[:, 0]\n y[0] = -y[0]\n last_state['horizontal_flip'] = True\n else:\n last_state['horizontal_flip'] = False\n\n # Rotation\n if keep:\n theta = last_state['rotation']\n else:\n if self.dict['rotation_range'] > 0. and np.random.random() < self.rotation_prob:\n theta = np.pi / 180 * np.random.uniform(-self.dict['rotation_range'], self.dict['rotation_range'])\n else:\n theta = 0\n last_state['rotation'] = theta\n\n # Translation in y (in pixels)\n if keep:\n tx = last_state['shift_x']\n else:\n if self.dict['height_shift_range'] > 0. and np.random.random() < self.shift_prob:\n tx = np.random.uniform(-self.dict['height_shift_range'], self.dict['height_shift_range'])\n else:\n tx = 0\n last_state['shift_x'] = tx\n\n # Translation in x (in pixels)\n if keep:\n ty = last_state['shift_y']\n else:\n if self.dict['width_shift_range'] > 0. and np.random.random() < self.shift_prob:\n ty = np.random.uniform(-self.dict['width_shift_range'], self.dict['width_shift_range'])\n else:\n ty = 0\n last_state['shift_y'] = ty\n\n # Zoom\n if keep:\n z = last_state['zoom']\n else:\n if self.dict['zoom_range'][0] != 1 and self.dict['zoom_range'][1] != 1 and \\\n np.random.random() < self.zoom_prob:\n z = np.random.uniform(self.dict['zoom_range'][0], self.dict['zoom_range'][1])\n else:\n z = 1\n last_state['zoom'] = z\n\n # Apply composition of transformations\n transform_matrix = None\n if theta != 0:\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)\n\n if z != 1:\n zoom_matrix = np.array([[z, 0, 0],\n [0, z, 0],\n [0, 0, 1]])\n transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n face_img = apply_transform_matrix(self, face_img, transform_matrix)\n nface_img = apply_transform_matrix(self, nface_img, transform_matrix)\n leye_img = apply_transform_matrix(self, leye_img, transform_matrix)\n reye_img = apply_transform_matrix(self, reye_img, transform_matrix)\n\n # Illumination\n if self.dict['illumination_range'][0] > 0. and self.dict['illumination_range'][0] != 1 \\\n and self.dict['illumination_range'][1] != 1:\n if not keep and np.random.random() < self.illumination_prob:\n [face_img, nface_img, leye_img, reye_img], last_state['illumination'] = \\\n modify_illumination([face_img, nface_img, leye_img, reye_img], self.dict['illumination_range'])\n elif keep and last_state['illumination'] != 1:\n [face_img, nface_img, leye_img, reye_img], last_state['illumination'] = \\\n modify_illumination([face_img, nface_img, leye_img, reye_img], self.dict['illumination_range'],\n last_state['illumination'])\n\n # Additive gaussian noise\n if self.dict['gaussian_noise_range'][1] > 0.:\n if not keep and np.random.random() < self.gaussian_noise_prob:\n [face_img, nface_img, leye_img, reye_img], last_state['gauss_var'], last_state['gauss_noise'] = \\\n add_gaussian_noise([face_img, nface_img, leye_img, reye_img], self.dict['gaussian_noise_range'])\n elif keep and last_state['gauss_noise'] != []:\n [face_img, nface_img, leye_img, reye_img], last_state['gauss_var'], last_state['gauss_noise'] = \\\n add_gaussian_noise([face_img, nface_img, leye_img, reye_img], self.dict['gaussian_noise_range'],\n last_state['gauss_var'], last_state['gauss_noise'])\n\n return [face_img, nface_img, leye_img, reye_img, lndmks], y, last_state", "def augment_brightness_camera_images(image):\n\n # The HSV - Hue Saturation Value representation converts the image from RGB space to HSV space\n # where the Value(brightness) represents the brightness that is randomly increased\n\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n #print(random_bright)\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1", "def get_validation_augmentation():\n test_transform = [\n albu.PadIfNeeded(384, 480)\n ]\n return albu.Compose(test_transform)", "def augmented(self, aug):\n out = getcopy(self)\n out.augment(aug)\n return out", "def load_image_gt(dataset, config, image_id, augmentation=None):\n # Load image and mask\n image = dataset.load_image(image_id)\n global_mask, mask, class_ids, class_ids2, text_embeds, embed_lengths = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n # TODO\n # global_mask = utils.resize_mask(global_mask, scale, padding, crop)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # global_mask = det.augment_image(global_mask)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n if config.SOFT_MASK:\n mask *= 255\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n global_mask = det.augment_image(global_mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n \n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n if not config.SOFT_MASK:\n mask = mask.astype(np.bool)\n global_mask = global_mask.astype(np.bool)\n else:\n mask = np.array(mask/255., np.float32)\n\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n \n class_ids = class_ids[_idx]\n class_ids2 = class_ids2[_idx]\n # NOTE NOTE NOTE if label2 is derection, augmentation mast be care hare\n # ------------------------------------------------------------\n def rot90_augment(image, mask, global_mask, class_ids2):\n k = np.random.choice([0, 1, 2, 3])\n if k:\n image = np.rot90(image, k)\n mask = np.rot90(mask, k)\n global_mask = np.rot90(global_mask, k)\n map_dict = {1: dict(zip([0,1,2,3], [1,2,3,0])),\n 2: dict(zip([0,1,2,3], [2,3,0,1])),\n 3: dict(zip([0,1,2,3], [3,0,1,2]))}\n class_ids2 = np.array([map_dict[k][i] for i in class_ids2])\n return image, mask, global_mask, class_ids2\n image, mask, global_mask, class_ids2 = rot90_augment(image, mask, global_mask, class_ids2)\n text_embeds = text_embeds[_idx]\n embed_lengths = embed_lengths[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox, mask_score = utils.extract_bboxes(mask)\n rbbox = utils.extract_minienclose_bboxes(mask)\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n # print (\"dataset.source_class_ids\", dataset.source_class_ids)\n # dataset.source_class_ids {'': [0], 'coco_label2': [0, 8, 9, 10, 11], 'coco': [0, 1, 2, 3, 4, 5, 6, 7]}\n source_class_ids2 = dataset.source_class_ids['coco_label2']\n active_class_ids[source_class_ids2[1: ]] = 1\n active_class_ids2 = active_class_ids[config.NUM_CLASSES: ]\n active_class_ids = active_class_ids[: config.NUM_CLASSES]\n \n # Resize masks to smaller size to reduce memory usage\n if config.USE_MINI_MASK:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE, softmask=config.SOFT_MASK)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids, active_class_ids2)\n\n\n return image, image_meta, class_ids, class_ids2, bbox, rbbox, global_mask, mask, mask_score, text_embeds, embed_lengths", "def bulk_augment_images(input_path, output_path, extension, augmentation, label_type, label_threshold=-1):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dir_path, file)\n label = get_labels([src], label_type)[0]\n if label > label_threshold:\n img = cv2.imread(src, 0)\n f_name, f_ext = os.path.splitext(file)\n if augmentation == 'flip':\n img = np.flip(img, axis=-1)\n file = f_name + \"_flipped\" + f_ext\n elif augmentation == 'original':\n file = f_name + \"_original\" + f_ext\n elif augmentation == 'rotate_crop':\n rotation = np.random.choice((-10, 10))\n img = rotate_and_crop_image(img, rotation)\n file = f_name + \"_rotated\" + f_ext\n else:\n raise ValueError(\n \"Invalid value for 'augmentation'. Value can be 'flip', 'original', 'rotate_crop, \"\n \"value was: {}\".format(augmentation))\n dest = os.path.join(structure, file)\n cv2.imwrite(dest, img)", "def enhance(img, window=30):\n hp = highPassFilter(img, window=window)\n tmp = grayscale(img) + laplacian(img)\n return tmp", "def augmentImageByRotation(imagePath, numRotations, originalBin, data_path):\n angles = np.linspace(0, 360, numRotations + 1, endpoint=False)[1:]\n augmentedImages = []\n rgb = cv2.imread(os.path.join(data_path, imagePath))\n dt = exrToNumpy(os.path.join(os.path.dirname(os.path.join(data_path, imagePath)), 'liver_0_dt.exr'))\n dl = exrToNumpy(os.path.join(os.path.dirname(os.path.join(data_path, imagePath)), 'liver_0_dl.exr'))\n newRatings = open(new_ratings_file_path, 'a')\n generated_images = 0\n for i, angle in enumerate(angles):\n # try different offsets if exact rotation does not give the same bin as the original image\n offsets = np.linspace(0, 10, 100, endpoint=False)\n newBin = None\n save_version = False\n for offset in offsets:\n rgb_r, dt_r, dl_r = rotate_image(rgb, dt, dl, angle + offset)\n # rate image\n rating, _ = rate_tumordistance_depth.rateImage(None, None, None, num_tumors, images=[rgb_r, dt_r, dl_r])\n newBin = getBinFromRating(rating, num_bins)\n # if bins match, save image\n if originalBin == newBin:\n save_version = True\n break\n if save_version:\n rotDir = os.path.join(augmentedDataPath, os.path.dirname(imagePath) + \"_rot\" + str(i))\n os.makedirs(rotDir)\n # save images to rotDir\n rgb_path = os.path.join(rotDir, 'liver_0.png')\n dt_path = os.path.join(rotDir, 'liver_0_dt.exr')\n dl_path = os.path.join(rotDir, 'liver_0_dl.exr')\n cv2.imwrite(rgb_path, rgb_r)\n save_exr_from_numpy(dt_path, dt_r)\n save_exr_from_numpy(dl_path, dl_r)\n # make entry in new ratings file\n save_path = os.path.relpath(rgb_path, data_path)\n newRatings.write(getRatingsLine(save_path, rating))\n generated_images += 1\n newRatings.close()\n if generated_images == 0:\n print \"Could not match bins. (\" + imagePath + \")\"\n return generated_images", "def parse_function_augment(example_proto):\r\n\r\n\t# Parse through features and extract byte string\r\n\tparsed_features = tf.parse_single_example(example_proto,features ={\r\n\t\t'image': tf.FixedLenFeature([],tf.string),\r\n\t\t'joint': tf.FixedLenFeature([],tf.string),\r\n\t\t'offset': tf.FixedLenFeature([],tf.string),\r\n\t\t'handScale': tf.FixedLenFeature([],tf.string)\r\n\t\t},name='features')\r\n\r\n\t# Decode content into correct types\r\n\timage_dec = tf.decode_raw(parsed_features['image'],tf.float32)\r\n\tjoint_dec = tf.decode_raw(parsed_features['joint'],tf.float32)\r\n\toffset_dec = tf.decode_raw(parsed_features['offset'],tf.float32)\r\n\thandScale_dec = tf.decode_raw(parsed_features['handScale'],tf.float32)\r\n\r\n\t# Reshape image to 176x176\r\n\timage_reshaped = tf.reshape(image_dec,[176,176,1])\r\n\r\n\t# Crop 128x128 image around COM\r\n\timage_com_cropped = tf.image.crop_to_bounding_box(image_reshaped,24,24,128,128)\r\n\r\n\t# Data Augmentation\r\n\timage_com_cropped, joint_dec, offset_dec, handScale_dec = tf.py_func(augmentation_cv,[image_com_cropped, joint_dec, offset_dec, handScale_dec],[tf.float32, tf.float32, tf.float32, tf.float32])\r\n\timage_com_cropped = tf.reshape(image_com_cropped,[128,128,1])\r\n\r\n\t# TF IMPLEMENTATION OF DATA AUGMENTATION: MIGHT BE SLOWER WHEN TF IS NOT COMPILED FROM SOURCE\r\n\t# image_reshaped, joint_dec, offset_dec, handScale_dec = augmentation(image_reshaped, joint_dec, offset_dec, handScale_dec)\r\n\r\n\treturn image_com_cropped, joint_dec, offset_dec, handScale_dec", "def viz_sample_overlayed_seg_augmentations(X, Y, aug_func, colormap, n_images=5, n_per_image=5, saveto=None):\n X = X[:n_images]\n Y = Y[:n_images]\n gx = []\n gy = []\n\n # Perform Augmentations\n for col in range(n_per_image):\n x, y = aug_func(X, Y)\n gx.append(x)\n gy.append(y)\n\n # Put into a grid\n _, height, width, n_channels = X.shape\n gx = np.array(gx, dtype=np.uint8).reshape(n_images*n_per_image, height, width, n_channels)\n gy = np.array(gy, dtype=np.uint8).reshape(n_images*n_per_image, height, width)\n gx = batch2grid(gx, n_per_image, n_images)\n gy = batch2grid(gy, n_per_image, n_images)\n\n # Overlay labels on top of image\n return viz_overlayed_segmentation_label(img=gx, label=gy, colormap=colormap, saveto=saveto)", "def cmp_data_aug_image(train_dataset, train_dir):\n target_class = random.choice(train_dataset.class_names)\n target_dir = train_dir + '/' + target_class\n random_image = random.choice(os.listdir(target_dir))\n random_image_path = target_dir + '/' + random_image\n print(random_image_path)\n\n # Read and plot in the random image\n img = mpimg.imread(random_image_path)\n plt.imshow(img)\n plt.title(f\"Original Image from class: {target_class}\")\n plt.axis(False)\n\n # Now let's plot our augmented random image\n augmented_img = data_augmentation(tf.expand_dims(img, axis=0))\n plt.figure()\n plt.imshow(tf.squeeze(augmented_img/255.)) #Invalid shape (1, 553, 440, 3) for image data - squeezed after getting this error\n plt.title(f\"Augmented Image from class: {target_class}\")\n plt.axis(False)", "def get_validation_augmentation():\n test_transform = [albu.PadIfNeeded(384, 480), albu.PadIfNeeded(None, None, 32, 32)]\n return albu.Compose(test_transform)" ]
[ "0.75550324", "0.7301866", "0.72235656", "0.7217813", "0.72143877", "0.7079705", "0.6807167", "0.6775062", "0.67542374", "0.6687042", "0.6650061", "0.6619291", "0.6592331", "0.6575989", "0.65661603", "0.6518033", "0.64495766", "0.6393832", "0.63352853", "0.6332074", "0.62804836", "0.6274753", "0.626837", "0.62564754", "0.6256198", "0.6248954", "0.62435496", "0.623712", "0.6229834", "0.62279063", "0.62276", "0.6167282", "0.6163374", "0.61554605", "0.61398673", "0.60951734", "0.60891443", "0.6067485", "0.60455066", "0.60454476", "0.6020185", "0.5982973", "0.59628683", "0.59472984", "0.5946605", "0.59446895", "0.59434056", "0.59260154", "0.5920812", "0.5906044", "0.58466977", "0.58173066", "0.5813631", "0.5808042", "0.5778083", "0.5768813", "0.5754907", "0.57519984", "0.57252544", "0.5703264", "0.5693561", "0.56845033", "0.56841433", "0.5673968", "0.5666338", "0.56522775", "0.56479925", "0.56350344", "0.5632549", "0.56120145", "0.5605663", "0.56044465", "0.5586899", "0.5586187", "0.55799395", "0.55514586", "0.5548365", "0.55477023", "0.5547487", "0.5524075", "0.55007124", "0.54974777", "0.54888505", "0.54703987", "0.54685944", "0.5460845", "0.5459569", "0.545879", "0.545681", "0.5456406", "0.543904", "0.54230636", "0.5408547", "0.54072624", "0.53919953", "0.53919667", "0.5391449", "0.53903675", "0.53796285", "0.5364697" ]
0.7171862
5
Configures the target schema in which the tweets data will be stored, creates the schema and the table if not existing yet
def init_db(conn: Connection) -> None: logger.info(f"{Fore.YELLOW}Initializing database ...{Style.RESET_ALL}") # Create specified schema if not exists if not conn.dialect.has_schema(conn, schema_name): logger.info(f"{Fore.YELLOW}Schema {schema_name} does not exist, creating it ...{Style.RESET_ALL}") conn.execute(schema.CreateSchema(schema_name)) logger.info(f"{Fore.GREEN}Schema {schema_name} successfully created !{Style.RESET_ALL}") else: logger.info(f"{Fore.GREEN}Schema {schema_name} was found, continuing database initialization " f"...{Style.RESET_ALL}") # Create tables Base.metadata.create_all(bind=conn) logger.info(f"{Fore.GREEN}Schema {schema_name} successfully configured !{Style.RESET_ALL}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def init():\n database.create_tables([Tracker])\n database.commit()", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "def configure(self, config):\n # create the follower table if it doesn't already exist\n model.follower_table.create(checkfirst=True)", "def create_schema(engine):\n Base.metadata.create_all(bind=engine)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def createTables(self):\n metadata = Base.metadata\n metadata.create_all(self._engine)\n return", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def create_table(self):\n pass", "def create_all_tables(self):\n pass", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def create_tables():\n db.create_all()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def initialise(self):\n\n if self.db_type == 'sqlite':\n try:\n # Attempt to create schema if not present, to cope with fresh DB file\n BaseSQLite.metadata.create_all(self.engine)\n except OperationalError:\n print(\"Error creating database schema, possible invalid path? ('\" + self.db_name + \"'). Quitting\")\n exit()\n elif self.db_type == 'postgres':\n try:\n # ensure that create schema scripts created before create table scripts\n event.listen(BasePostgres.metadata, 'before_create', CreateSchema('datastore_schema'))\n BasePostgres.metadata.create_all(self.engine)\n except OperationalError:\n print(f\"Error creating database({self.db_name})! Quitting\")\n exit()", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def create_all_schemas_and_tables(self):\n for schema, tables in self.schemas.items():\n self.create_schema(schema)\n for table in tables.keys():\n self.create_table(schema, table)", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def createDB(self):\n\n\n mycursor.execute(\"DROP TABLE tweet\")\n mycursor.execute(\"DROP TABLE follower\")\n\n mycursor.commit()\n\n createFollowerTable = \"CREATE TABLE follower (\" \\\n \"screen_name VARCHAR(255),\" \\\n \"name varchar(255),\" \\\n \"PRIMARY KEY(screen_name)\" \\\n \")\"\n\n #createTweetTable = \"CREATE TABLE tweet (\" \\\n # \"idT VARCHAR(255),\" \\\n # \"idF VARCHAR(255),\" \\\n # \"type VARCHAR(255),\" \\\n # \"content VARCHAR(140),\" \\\n # \"weight INTEGER(10),\" \\\n # \"PRIMARY KEY(idT),\" \\\n # \"FOREIGN KEY (idF) REFERENCES follower(idF)\" \\\n # \")\"\n\n mycursor.execute(createFollowerTable)\n #mycursor.execute(createTweetTable)\n\n mydb.commit()", "def bootstrap():\n Base.metadata.create_all(engine)", "def __init__(self, config_path: str = \"config.yml\", config_dict: dict = None,\n create_all: bool = True):\n\n # Prepare user_details configured in config.yml for user_details table creation\n self.config = Config(config_path, config_dict)\n user_details_list = []\n if \"twitter_user_details\" in self.config.config:\n for detail, sqldatatype in self.config.config[\"twitter_user_details\"].items():\n if sqldatatype is not None:\n user_details_list.append(detail + \" \" + sqldatatype)\n else:\n print(\"\"\"Key \"twitter_user_details\" could not be found in config.yml. Will not create\n a user_details table.\"\"\")\n\n # Table creation for SQLITE database type.\n # Note and TODO: the collector does not support sqlite (yet)\n if self.config.dbtype.lower() == \"sqlite\":\n try:\n self.engine = lite.connect(self.config.dbname + \".db\")\n print(\"Connected to \" + self.config.dbname + \"!\")\n except Error as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_friends_index_sql_1 = \"CREATE INDEX iFSource ON friends(source);\"\n create_friends_index_sql_2 = \"CREATE INDEX iFTimestamp ON friends(timestamp);\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_results_index_sql_1 = \"CREATE INDEX iRSource ON result(source);\"\n create_results_index_sql_2 = \"CREATE INDEX iRTimestamp ON result(timestamp);\"\n c = self.engine.cursor()\n c.execute(create_friends_table_sql)\n c.execute(create_friends_index_sql_1)\n c.execute(create_friends_index_sql_2)\n c.execute(create_results_table_sql)\n c.execute(create_results_index_sql_1)\n c.execute(create_results_index_sql_2)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\",\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP);\"\"\"\n create_ud_index = \"CREATE INDEX iUTimestamp ON user_details(timestamp)\"\n c.execute(create_user_details_sql)\n c.execute(create_ud_index)\n else:\n # TODO: Make this a minimal user_details table?\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except Error as e:\n print(e)\n\n # Table creation for mysql database type\n elif self.config.dbtype.lower() == \"mysql\":\n try:\n self.engine = create_engine(\n f'mysql+pymysql://{self.config.dbuser}:'\n f'{self.config.dbpwd}@{self.config.dbhost}/{self.config.dbname}'\n )\n print('Connected to database \"' + self.config.dbname + '\" via mySQL!')\n except OperationalError as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n ON UPDATE CURRENT_TIMESTAMP,\n UNIQUE INDEX fedge (source, target),\n INDEX(timestamp)\n );\"\"\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n UNIQUE INDEX redge (source, target),\n INDEX(timestamp)\n );\"\"\"\n self.engine.execute(create_friends_table_sql)\n self.engine.execute(create_results_table_sql)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\", timestamp TIMESTAMP\n DEFAULT CURRENT_TIMESTAMP,\n INDEX(timestamp));\"\"\"\n self.engine.execute(create_user_details_sql)\n else:\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except OperationalError as e:\n raise e", "def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def migrate(cls):\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS meetups(\n id serial PRIMARY KEY,\n topic varchar,\n happening_date varchar,\n tags varchar,\n location varchar,\n images varchar,\n body varchar\n )\"\"\")\n database.connection.commit()", "def create_schema(self, schema: str):\n return", "def create_tables() -> None:\n print(\"Creating database tables using SQLAlchemy ORM\")\n Base.metadata.create_all(engine)\n print(\"Done creating tables\")", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def initdb():\n db = getdb()\n\n with open(os.path.join(config.BASE_DIRECTORY, 'schema.sql')) as f:\n db.executescript(f.read())", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def create_table(self, schema: str, table: str, col_types: dict, non_null_columns: List[str]):\n return", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def setUp(self):\n self.conn = seed.connect_to_db(\"testing\")\n self.cur = self.conn.cursor()\n\n seed.cur = self.conn.cursor()\n seed.conn = self.conn\n\n self.tables = [\n {\n \"name\": \"people\", \n \"schema\": [(\"firstname\", \"10\", \"VARCHAR\"), (\"lastname\", \"10\", \"VARCHAR\"), (\"age\", \"3\", \"INTEGER\"), (\"active\", \"1\", \"BOOLEAN\")]\n },\n {\n \"name\": \"animals\",\n \"schema\": [(\"animal_id\", \"7\", \"INTEGER\"), (\"name\", \"10\", \"VARCHAR\"), (\"species\", \"20\", \"VARCHAR\")]\n },\n {\n \"name\":\"testformat1\",\n \"schema\": [(\"name\", \"10\", \"VARCHAR\"), (\"valid\", \"1\", \"BOOLEAN\"), (\"count\", \"3\", \"INTEGER\")]\n }\n ]\n for table in self.tables:\n seed.create_table(table[\"name\"], table[\"schema\"])", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))", "def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )", "def setup_schema(command, conf, vars):", "def check_db_schema(self):\n if not self.db.get_tables():\n self.create_db_schema()", "def initial_db_setup() -> None:\n db_filename = \"twdft.db\"\n db_path = os.path.join(TWDFT_DATA_DIR, db_filename)\n csv_filename = \"sites.csv\"\n csv_path = os.path.join(TWDFT_DATA_DIR, csv_filename)\n db_is_new = not os.path.exists(db_path)\n sites_csv = os.path.join(TWDFT_DATA_DIR, csv_filename)\n\n if db_is_new:\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n\n # first we create a site object\n c.execute(\n \"\"\"\n CREATE TABLE site(\n id INTEGER PRIMARY KEY,\n name TEXT,\n site_type TEXT,\n sub_category TEXT,\n address_1 TEXT,\n address_2 TEXT,\n town TEXT,\n county TEXT,\n country TEXT,\n postcode TEXT,\n site_category TEXT,\n freq_target TEXT,\n created TEXT,\n notes TEXT,\n last_inspection TEXT,\n next_inspection TEXT,\n pfsp_approval TEXT,\n pfsp_expiry TEXT,\n unlocode TEXT,\n pfso TEXT,\n pso TEXT,\n pfsa_approval TEXT,\n pfsa_expiry TEXT,\n team TEXT,\n created_by TEXT,\n last_updated TEXT,\n updated_by TEXT,\n afp_loc TEXT,\n rdf TEXT,\n classification TEXT,\n article24 TEXT,\n psa_approval TEXT,\n inspection_due TEXT\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspection table\n\n c.execute(\n \"\"\"\n CREATE TABLE inspection(\n id INTEGER PRIMARY KEY,\n site INTEGER,\n date TEXT,\n status TEXT,\n time TEXT,\n FOREIGN KEY(site) REFERENCES site(id)\n )\n \"\"\"\n )\n conn.commit()\n\n # next we want an inspector table\n c.execute(\n \"\"\"\n create table inspector(\n id integer primary key,\n first_name text,\n last_name text\n )\n \"\"\"\n )\n conn.commit()\n\n for i in INSPECTORS:\n first = i.split(\" \")[0]\n last = i.split(\" \")[1]\n c.execute(\n \"INSERT INTO inspector(first_name, last_name) VALUES (?,?)\",\n (first, last),\n )\n\n # a table that links inspectors with inspections\n c.execute(\n \"\"\"\n CREATE TABLE inspector_inspections(\n inspector INTEGER,\n inspection INTEGER,\n FOREIGN KEY (inspector) REFERENCES inspector(id),\n FOREIGN KEY (inspection) REFERENCES inspection(id)\n )\n \"\"\"\n )\n conn.commit()\n\n for site in map(Site._make, csv.reader(open(csv_path, \"r\"))):\n try:\n c.execute(\n f\"\"\"\n INSERT INTO site VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n int(site.id.replace(\",\", \"\")),\n site.name,\n site.site_type,\n site.sub_category,\n site.address_1,\n site.address_2,\n site.town,\n site.county,\n site.country,\n site.postcode,\n site.site_category,\n site.freq_target,\n site.created,\n site.notes,\n site.last_inspection,\n site.next_inspection,\n site.pfsp_approval,\n site.pfsp_expiry,\n site.unlocode,\n site.pfso,\n site.pso,\n site.pfsa_approval,\n site.pfsa_expiry,\n site.team,\n site.created_by,\n site.last_updated,\n site.updated_by,\n site.afp_loc,\n site.rdf,\n site.classification,\n site.article24,\n site.psa_approval,\n site.inspection_due,\n ),\n )\n except sqlite3.IntegrityError as e:\n print(\"That hasnae worked\", site.inspection_due)", "def createschema(self):\n def closure(cur):\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS config (\n key varchar(1024) PRIMARY KEY,\n value text\n );\n CREATE TABLE IF NOT EXISTS rooms (\n id serial PRIMARY KEY,\n name text NOT NULL\n );\n CREATE TABLE IF NOT EXISTS slides (\n id serial PRIMARY KEY,\n -- The ordering index of the slide, set to NULL if slide should be hidden\n sequence_no integer NULL UNIQUE,\n -- The room that should be displayed on this slide, set to NULL for master slides aren't associated with a room\n room integer REFERENCES rooms NULL,\n -- The masters are numbered sequentially and defined in content.py\n master integer NOT NULL,\n -- Overrides the title (normally the room name will be used)\n title text NULL,\n -- If max_rows is NULL, use the config default\n max_rows integer NULL\n );\n CREATE TABLE IF NOT EXISTS events (\n id serial PRIMARY KEY,\n room integer REFERENCES rooms NOT NULL,\n begins timestamp NOT NULL,\n ends timestamp NOT NULL,\n name text NOT NULL\n );\n \"\"\")\n \n self.execute(closure)", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def initialize_empty_database(self):\r\n Base.metadata.create_all(self.engine)", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def init_db(base):\n base.metadata.create_all(engine, checkfirst=True)", "def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")", "def initialize_db(self, table_name: str):\n create_table_sql = f\"\"\"\n create table if not exists {table_name} (\n id integer primary key autoincrement not null,\n sample_date text not null unique,\n location text not null,\n min_temp real not null,\n max_temp real not null,\n avg_temp real not null);\n \"\"\"\n with DBOperations(self.name) as dbcm:\n dbcm.execute(create_table_sql)", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def init_db():\n db = get_db()\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))", "def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()", "def init_db() -> None: \n \n Base.metadata.create_all(bind=engine)", "def _initialize_db():\n conn, c = _get_db_connection()\n\n with open(str(SCHEMA_PATH)) as f:\n c.executescript(f.read())\n\n conn.close()", "def init_db():\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))", "def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e", "def create_tables():\n inf(\"Creating tables\")\n \n pinners = Table('pinners', metadata,\n Column('pinner_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n pinners.create()\n \n contents = Table('contents', metadata,\n Column('content_id', Integer, primary_key=True),\n Column('url', String(80)),\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id'))\n )\n contents.create()\n\n reviewers = Table('reviewers', metadata,\n Column('reviewer_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n reviewers.create()\n\n complaints = Table('complaints', metadata,\n Column('complaint_id', Integer, primary_key=True),\n Column('complaint_timestamp', DateTime), # when the complaint was filed\n Column('complaint_type', String(80)), # objectionable, copyright\n Column('process_status', String(20)), # complaint, review, done\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('review_timestamp', DateTime), # when the compliant was resolved\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id')),\n Column('reviewer_id', Integer, ForeignKey('reviewers.reviewer_id')),\n Column('content_id', Integer, ForeignKey('contents.content_id'))\n )\n complaints.create()\n \n # could create a table of \"near by\" images and/or near by features and \n # include these in the review", "def create_tables(self):\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n Base.metadata.create_all(self.engine)", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql", "def init_db():\n\twith closing(connect_db()) as db:\n\t\twith app.open_resource('schema.sql', mode='r') as f:\n\t\t\tdb.cursor().executescript(f.read())\n\t\tdb.commit()", "def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def create_all_tables():\n\tcommon_db.create_all_tables()", "def create_tables(self):\n\n self.cur.execute('''CREATE TABLE IF NOT EXISTS my_business_entry\n (\n id SERIAL PRIMARY KEY,\n url_yes_no boolean,\n url TEXT,\n phone_yes_no boolean,\n phone TEXT,\n rating TEXT,\n nr_of_ratings TEXT,\n myBusiness boolean,\n company TEXT\n );''')\n\n self.connection.commit()", "def create_tables(self):\n\n cur = self.conn.cursor()\n cur.execute('CREATE TABLE blog(blog_id INTEGER PRIMARY KEY, '\n ' title TEXT, subtitle TEXT, content TEXT, date TEXT, '\n ' author_id INTEGER, '\n 'FOREIGN KEY (author_id) REFERENCES author(author_id)) ')\n\n cur.execute('CREATE TABLE author(author_id INTEGER PRIMARY KEY, '\n ' name TEXT UNIQUE) ')\n\n cur.execute('CREATE TABLE password(password_id INTEGER PRIMARY KEY,'\n ' author_id INTEGER, '\n ' password TEXT, '\n 'FOREIGN KEY (author_id) REFERENCES author(author_id)) ')\n\n self.conn.commit()", "def create_table(my_database, new_table):\n\n dbconnect = connect_db(my_database)\n\n # create a cursor for the queries\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # here we delete the table, it can be kept or else\n cursor.execute(f\"DROP TABLE IF EXISTS {new_table}\")\n\n # these matches the Twitter data\n query = (\n f\"CREATE TABLE `{new_table}` (\"\n \" `id` INT(11) NOT NULL AUTO_INCREMENT,\"\n \" `user` varchar(100) NOT NULL ,\"\n \" `created_at` timestamp,\"\n \" `tweet` varchar(255) NOT NULL,\"\n \" `retweet_count` int(11) ,\"\n \" `id_str` varchar(100),\"\n \" PRIMARY KEY (`id`))\"\n )\n\n cursor.execute(query)\n dbconnect.close()\n cursor.close()\n\n return print(f\"Created {new_table} table\")", "def _create_table_if_not_exists(self) -> None:\n COLUMN_DEFINITIONS = 'definitions'\n COLUMN_TYPE = 'type'\n\n KEY_REF = '$ref'\n\n TYPE_LOOKUP = {\n 'string': 'VARCHAR(255)',\n 'integer': 'INTEGER',\n 'boolean': 'BOOLEAN',\n 'number': 'INTEGER',\n }\n\n def ref_lookup(\n property: Dict[str, Any], fields: Dict[str, Any]\n ) -> Dict[str, Any]:\n ref = property[KEY_REF]\n property_lookup_name = ref[ref.rfind('/') + 1 :]\n return fields[COLUMN_DEFINITIONS][property_lookup_name]\n\n field_queries = []\n fields = json.loads(self.schema.schema_json())\n\n del fields[Keywords.Properties.value][\n Keywords.ID.value\n ] # Remove primary key field. It is handled with auto increment below.\n\n for property_name, property in fields[Keywords.Properties.value].items():\n if KEY_REF in property:\n property = ref_lookup(property, fields)\n field_queries.append(\n f'{property_name} {TYPE_LOOKUP[property[COLUMN_TYPE]]}'\n )\n table_columns = ', '.join(field_queries)\n\n with connect(**BaseModel.db_settings) as connection:\n cursor = connection.cursor()\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {self.table_name} (ID INTEGER PRIMARY KEY AUTO_INCREMENT, {table_columns})'\n )\n self._table_created[self.table_name] = True", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def createTable(self):\n ## reading the source file\n\n \n ## building the hive script\n\n ## creating the metastore table by executing the Hive script on the remote machine (SSH)", "def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def init_sensor_db(self, drop_tables=True):\n logger.debug(\"Creating Database Engine.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n\n if drop_tables:\n logger.debug(\"Drop system table if within the existing database.\")\n Base.metadata.drop_all(db_engine)\n\n logger.debug(\"Creating Sentinel1ASF Database.\")\n Base.metadata.bind = db_engine\n Base.metadata.create_all()", "def create_schema(self):\n schema = '''CREATE TABLE jping (\n ip_address text not null,\n interface text not null,\n hostname text not null,\n ping_results integer not null,\n UNIQUE(ip_address, hostname)\n )\n '''\n self.query(schema)", "def create_tables(self, tables=None):\n LOG.debug(f\"Creating table subset {tables}\")\n Base.metadata.create_all(self.engine, tables, checkfirst=False)", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query", "def setup_db(filepath, tables=(), reset=False):\n \n if os.path.exists(filepath) and not reset:\n return\n \n if os.path.exists(filepath) and reset:\n os.remove(filepath)\n \n # create table with appropriate columns\n with get_conn(filepath) as conn:\n for tab in tables:\n make_table(conn, tab.name,\n tab.text_fields, tab.real_fields)" ]
[ "0.7223083", "0.7214102", "0.6795453", "0.6649024", "0.65013885", "0.64839", "0.64839", "0.6444698", "0.6426618", "0.64096576", "0.6398911", "0.63895595", "0.6386641", "0.63824946", "0.63433653", "0.63402724", "0.63384145", "0.63329905", "0.6317877", "0.63123184", "0.6277626", "0.62550944", "0.62542766", "0.6235056", "0.62239826", "0.6214488", "0.61996657", "0.6193519", "0.6186089", "0.6183617", "0.61830634", "0.61792237", "0.6174591", "0.6163997", "0.6161633", "0.6157953", "0.6157953", "0.61422336", "0.6120602", "0.61119777", "0.61110204", "0.61041355", "0.6099554", "0.60951245", "0.60927194", "0.6091216", "0.6089686", "0.6081981", "0.60759526", "0.6074232", "0.60718346", "0.6071092", "0.60683346", "0.60659444", "0.60569954", "0.6053937", "0.6053407", "0.6049379", "0.60397303", "0.6030456", "0.6030456", "0.6030456", "0.6030456", "0.6030456", "0.6030456", "0.6030456", "0.6004474", "0.5996508", "0.5994124", "0.59932166", "0.59827393", "0.597735", "0.59770125", "0.59707946", "0.59707856", "0.5970429", "0.5969973", "0.596662", "0.5956844", "0.59518945", "0.59516066", "0.5948086", "0.59440374", "0.5943594", "0.5931639", "0.5931117", "0.592586", "0.5921865", "0.59157634", "0.5913353", "0.591282", "0.5901207", "0.5900577", "0.5900196", "0.5898342", "0.58969843", "0.5896314", "0.58943623", "0.5876528", "0.5871146", "0.58660233" ]
0.0
-1
Inserts fetched tweet data to the target database table
def insert_tweets(conn: Connection, fetch_data: Iterable[Dict]) -> None: s = Session(bind=conn) meta = MetaData() meta.reflect(bind=conn) s.add_all([Tweet(**t) for t in fetch_data]) s.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillTweetInDB(self):\n sqlInsertTweets = \"INSERT INTO tweet content VALUES %s\"\n mycursor.executemany(sqlInsertTweets,self.content)\n mydb.commit()", "def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ?);\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.executemany(query, infos)", "def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return", "def insert_tweets(post):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n for i in range(0,len(post['id_str'])):\n tweet={}\n tweet['user_id']=post['user_id']\n tweet['created_at'] = post['created_at'][i]\n tweet['id_str'] = post['id_str'][i]\n tweet['text'] = post['text'][i]\n tweet['source'] = post['source'][i]\n tweet['truncated'] = post['truncated'][i]\n tweet['in_reply_to_status_id_str'] = post['in_reply_to_status_id_str'][i]\n tweet['in_reply_to_screen_name'] = post['in_reply_to_screen_name'][i]\n tweet['coordinatesNumber'] = post['coordinatesNumber'][i]\n tweet['coordinates'] = post['coordinates'][i]\n tweet['coordinatesType'] = post['coordinatesType'][i]\n tweet['placeCountry'] = post['placeCountry'][i]\n tweet['placeCountryCode'] = post['placeCountryCode'][i]\n tweet['placeFullName'] = post['placeFullName'][i]\n tweet['placeID'] = post['placeID'][i]\n tweet['placeName'] = post['placeName'][i]\n tweet['placeType'] = post['placeType'][i]\n tweet['placeURL'] = post['placeURL'][i]\n tweet['quoted_status_id_str'] = post['quoted_status_id_str'][i]\n tweet['is_quote_status'] = post['is_quote_status'][i]\n tweet['retweeted_status'] = post['retweeted_status'][i]\n tweet['quote_count'] = post['quote_count'][i]\n tweet['reply_count'] = post['reply_count'][i]\n tweet['retweet_count'] = post['retweet_count'][i]\n tweet['favorite_count'] = post['favorite_count'][i]\n tweet['hashtagsNumber'] = post['hashtagsNumber'][i]\n tweet['hashtags'] = post['hashtags'][i]\n tweet['urls'] = post['urls'][i]\n tweet['urlsNumber'] = post['urlsNumber'][i]\n tweet['user_mentionsNumber'] = post['user_mentionsNumber'][i]\n tweet['user_mentions'] = post['user_mentions'][i]\n tweet['mediaNumber'] = post['mediaNumber'][i]\n tweet['mediaURLs'] = post['mediaURLs'][i]\n tweet['mediaType'] = post['mediaType'][i]\n tweet['symbolsNumber'] = post['symbolsNumber'][i]\n tweet['symbols'] = post['symbols'][i]\n tweet['pollsNumber'] = post['pollsNumber'][i]\n tweet['polls'] = post['polls'][i]\n tweet['possibly_sensitive'] = post['possibly_sensitive'][i]\n tweet['filter_level'] = post['filter_level'][i]\n tweet['lang'] = post['lang'][i]\n tweet['matching_rulesNumber'] = post['matching_rulesNumber'][i]\n tweet['matching_rulesTag'] = post['matching_rulesTag'][i]\n tweet['matching_rulesID'] = post['matching_rulesID'][i]\n tweet['collected_at'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n sqlite_insert(conn, 'GTapp_tweets', tweet)", "def insert_tweet(value):\n execute(query=_query['ins_tweet'],\n value=value,\n single=False)\n\n id_value = [[element[0]]for element in value]\n\n execute(query=_query['ins_sentiment'],\n value=id_value, # Tweet ID value\n single=False\n )", "def insert_tweet(status):\n status['replies'] = []\n return db.tweets.insert(status)", "def persist_db(database, tweets):\n log.debug(\"{} tweets to db\".format(len(tweets)))\n\n for tweet in tweets:\n tweet['_id'] = tweet['id_str']\n database.update(tweets)", "def load_twitter_data_to_db(self, truncate_table=False, skip_loaded_files=False):\n\n\t\ttable_fields_names, table_fields_types = self.identify_table_mask('twitter_stream_table-mask.txt')\n\n\t\t# Truncating table\n\t\tif truncate_table:\n\t\t\tquery = 'TRUNCATE TABLE ' + TABLE_NAME;\n\t\t\ttry:\n\t\t\t\tself.execute_query(query)\n\t\t\texcept Exception, e:\n\t\t\t\tprint '[e] Exeption: %s' % (str(e))\n\n\t\ttotal_queries = 0\n\t\terror_queries = 0\n\t\tsuccess_queries = 0\n\n\t\tfetcher = TwitterFetcher()\n \t\tfetched_tweets = fetcher.fetchsamples(10)\n\n \t\t\n \t\tfor tweet in fetched_tweets:\n\n \t\t\ttweet_as_list = list()\n \t\t\ttweet_as_list.append('(\"uni.vlba.gdelt.data::seq_twitter_stream_id\".nextval)')\n \t\t\ttweet_as_list.append(tweet)\n \t\t\t#print tweet_as_list\n\n \t\t\tif self.insert_data(tweet_as_list, table_fields_names, table_fields_types):\n\t\t\t\tsuccess_queries = success_queries + 1\n\t\t\telse:\n\t\t\t\terror_queries = error_queries + 1\n\n\t\ttotal_queries = success_queries + error_queries\t\t\n\t\t\n\t\tprint '\\n[i] Queries processed in total: %d\\n' % (total_queries)\n\n\t\tif error_queries > 0:\n\t\t\tprint '[i] Queries processed in total with errors: %d' % (error_queries)", "def store_tweet(tweet, topic):\n try:\n tweet = tweet.replace(\"'\", \"\\\\'\" )\n query = f\"insert into {db_schema}.{db_table_tweet} set tweet='{tweet}', topic='{topic}'\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweet_id = int(cur.lastrowid)\n logger.info(f'ID_TWEET: {tweet_id}') \n return tweet_id\n except Exception as ex:\n logger.exception(ex)", "def add(self, url):\n record_sql = '''\n INSERT INTO {} (url)\n VALUES (?)\n '''.format(\n self.tablename\n )\n try:\n with self.conn:\n self.conn.execute(record_sql, (url,))\n except sqlite3.IntegrityError:\n logger.exception('Already tweeted %s!', url)", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def exportToDB(self, tweets):\n for t in range(len(tweets)):\n for x in range(len(tweets[t])):\n doc_ref = self.fs_db.collection(u'twitter').document(str(tweets[t][1]))\n doc_ref.set({\n u'created_date': str(tweets[t][0]),\n u'id': str(tweets[t][1]),\n u'tweet': tweets[t][2],\n u'screen_name': tweets[t][3],\n u'name': tweets[t][4],\n u'likes': tweets[t][5],\n u'retweets': tweets[t][6],\n u'location': tweets[t][7]\n })", "def connect(created_at, username, tweet, location, followers_count, tweet_id):\n try:\n con = mysql.connector.connect(host = 'localhost',\n database='Twitter', user='root', password = db_password,\n auth_plugin='mysql_native_password', charset = 'utf8')\n\n if con.is_connected():\n\n #Insert twitter data\n\n cursor = con.cursor()\n \n query = \"INSERT INTO no_retweet (created_at, username, tweet, location, \\\n followers_count, tweet_id) \\\n VALUES (%s, %s, %s, %s, %s, %s)\"\n cursor.execute(query, (created_at, username, tweet, location, followers_count, tweet_id))\n\n con.commit()\n cursor.close()\n con.close()\n\n except Error as e:\n print(e)\n\n\n return", "def insert_data(data, collec, many):\n db = client.get_database('tweetstorm')\n collection = db.get_collection(collec)\n if many:\n collection.insert_many(data)\n logger.info(f\"{ymdhms()} inserted {len(data)} tweets to {collec} collection\")\n else:\n collection.insert_one(data)\n logger.info(f\"{ymdhms()} inserted data {data} to {collec} collection\")", "def store_result(id_tweet, response, polarity, subjectivity):\n try:\n # query = f\"insert into {db_schema}.{db_table_pred} set id_tweet={id_tweet} and ml_version='0.1' and response='{prediction}'\"\n query = f\"insert into {db_schema}.{db_table_pred} values({id_tweet}, '{ML_VERSION}', '{response}', {polarity}, {subjectivity})\"\n logger.info(f'QUERY: {query}')\n with MysqlCursor() as cur:\n cur.execute(query)\n except Exception as ex:\n logger.exception(ex)", "def insert_db():\n populate_tables()", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def on_data(self, data):\n try:\n # parse as json\n raw_data = json.loads(data)\n\n # extract the relevant data\n if \"text\" in raw_data:\n user = raw_data[\"user\"][\"screen_name\"]\n created_at = parser.parse(raw_data[\"created_at\"])\n tweet = raw_data[\"text\"]\n retweet_count = raw_data[\"retweet_count\"]\n id_str = raw_data[\"id_str\"]\n\n # insert data just collected into MySQL my_database\n populate_table(user, created_at, tweet, retweet_count, id_str)\n print(f\"Tweet colleted at: {created_at}\")\n\n except Error as e:\n print(e)", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def populate_table(self, data):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO film (title, film_id, year, director, cast, rating, poster_url) \n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cur.execute(sql, data)\n db.commit()\n except:\n print(\"An error occurred when saving the data!\")\n\n db.close()", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def add_tweet():\r\n tweet = models.Tweet(text_content=request.json['content'], username=request.json['username'],\r\n timestamp=datetime.datetime.now())\r\n db.session.add(tweet)\r\n db.session.commit()\r\n\r\n return {'id': tweet.id}", "def insert_data(self, table_name, data):\n for data_point in data:\n query = \"INSERT INTO %s(%s) VALUES (%s)\"\n\n fields = \", \".join(data_point.keys())\n values = \", \".join([self.pack_data(value) for value in data_point.values()])\n self.cursor.execute(query % (table_name, fields, values))\n self.db_connection.commit()", "def save_tweet(self, twitter) -> None:\n if isinstance(twitter, dict):\n json_data = twitter\n else:\n json_data = json.loads(twitter)\n\n try:\n breakpoint()\n self.db.tweets.find_one_and_update(\n {'id_str': json_data['id_str']},\n {'$inc': {'seq': 1}},\n projection={'seq': True, '_id': False},\n upsert=True,\n )\n except Exception as e:\n log.error(e)", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def on_data(self, data):\n\n t = json.loads(data) \n tweet = {\n 'text': t['text'],\n 'username': t['user']['screen_name'],\n 'followers_count': t['user']['followers_count']\n }\n\n logging.critical(f'\\n\\n\\nTWEET INCOMING: {tweet[\"text\"]}\\n\\n\\n')\n tweet_collection.insert({'username' : tweet['username'],'followers_count' : tweet['followers_count'], 'text' : tweet['text']})", "def insert_data(stored_data, table_name):\r\n for item in stored_data:\r\n cursor.execute('''INSERT INTO {} VALUES(?, ?, ?, ?)'''.format(table_name), item)", "def gatherData():\n\n # connect to database, set up the tweepy API object, and find the next date to search\n\n cnx = sqlite3.connect(DB_FILE)\n api = generateAPI(wait_on_rate_limit=True, wait_on_rate_limit_notify=True, **CREDENTIALS)\n\n nextdate = findNextDate(cnx, FIRSTDATE)\n year = nextdate[:4]\n\n # attempt to scrape box office data\n\n bodata = getTopMovies(BO_ENDPOINT, nextdate, CNT_MOVIES)\n\n if not bodata.empty:\n bodata.to_sql('boxoffice', ENGINE, if_exists='append', index=False)\n print(\"Box Office Data for [{0}] Written to Database\".format(nextdate))\n else:\n raise BOError(\"Error Scraping/Writing Box Office Data for [{0}]\".format(nextdate))\n\n # attempt to collect tweet data\n\n for movie in bodata.title:\n try:\n tweets = searchMovie(api, movie, nextdate, MAX_TWEETS)\n if not tweets.empty:\n tweets.to_sql('tweets', ENGINE, if_exists='append', index=False)\n print(\"Tweets for [{0}] Written to Database\".format(movie))\n else:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n except tweepy.error.TweepError:\n raise TweetError(\"Error Fetching/Writing Tweets for [{0}]\".format(movie))\n\n # attempt to collect movie metadata\n\n for movie in bodata.title:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), year)\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n minfo = getMovieInfo(OMDB_ENDPOINT, processTitle(movie), str(int(year)-1))\n if minfo:\n insertMovie(cnx, movie, nextdate, minfo)\n else:\n print(\"Movie: [{0}] Could Not be Found via OMDB\".format(movie))\n\n # commit changes and close DB connection\n\n cnx.commit()\n cnx.close()\n\n print(\"\\nAll Data for {0} Successfully Added to the Database!\\n\".format(nextdate))\n return nextdate", "def datapull_master(self, connect):\n self.newtable = 'popcorn'\n self.oldtable = 'hashtags'\n self.scorecol = 'score'\n self.coltype = 'INTEGER'\n self.pullvars = 'tweet_id, created_at, from_user_screen_name, from_user_id, favorite_count, retweet_count, content'\n self.modvars ='tweet_id, created_at, from_user_screen_name, from_user_id, favorite_count, retweet_count, score, content'\n self.filtervars ='language, entities_media_count, retweeted_status, truncated'\n self.filters = '''language = 'en' AND entities_media_count = 0 AND retweeted_status = '' AND truncated = 0'''\n self.ordering = 'score DESC'\n self.tweet_dt = 'created_at'\n self.cron_ordering = 'created_at ASC'\n self.c = connect.cursor()\n #Create modified table (drop if exists)\n self.c.execute(\"DROP TABLE IF EXISTS {newtab}\".format(newtab=self.newtable))\n self.c.execute(\"CREATE TABLE {newtab} AS SELECT {vars}, {filtervars} FROM {oldtab} ORDER BY {order}\" \\\n .format(newtab=self.newtable, vars=self.pullvars, filtervars=self.filtervars, oldtab=self.oldtable, order=self.cron_ordering))\n #Add in Score\n self.c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\n .format(tn=self.newtable, cn=self.scorecol, ct=self.coltype))\n self.c.execute(\"UPDATE {tn} SET {scorecol} = {fvt} + 5*{rt}\"\\\n .format(tn=self.newtable, scorecol=self.scorecol, fvt='favorite_count', rt='retweet_count'))\n connect.commit()", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"233@B.com\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"fy@B.com\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def insert_into_db(self, database):\n\n # insert person\n keys = \"\"\n values = \"\"\n for key, value in self.person.items():\n # location\n if key == \"location\":\n # ensure location is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * FROM p21_cdm.location WHERE city='{value['city']}' \n AND zip='{value['zip']}') THEN INSERT INTO p21_cdm.location (city, zip) \n VALUES ('{value['city']}', '{value['zip']}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.person (location_id, {keys[:-1]}) \n VALUES((SELECT location_id \n FROM p21_cdm.location\n WHERE city='{self.person['location']['city']}' \n and zip='{self.person['location']['zip']}'), \n {values[:-1]})\"\"\")\n\n # insert visits\n for visit in self.visits:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n for key, value in visit.items():\n if key == \"care_site_name\":\n # ensure care site is in table\n database.select(f\"\"\"DO $do$ BEGIN IF NOT EXISTS (SELECT * \n FROM p21_cdm.care_site \n WHERE care_site_name='{value}') \n THEN INSERT INTO p21_cdm.care_site (care_site_name) \n VALUES ('{value}'); END IF; END; $do$\"\"\")\n continue\n\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n database.select(f\"\"\"INSERT INTO p21_cdm.visit_occurrence (care_site_id, {keys[:-1]}) \n VALUES((SELECT care_site_id\n FROM p21_cdm.care_site\n WHERE care_site_name='{visit['care_site_name']}'),\n {values[:-1]}) \n RETURNING visit_occurrence_id\"\"\")\n\n # insert measurements, observations, conditions & procedures\n for data, tablename in [(self.measurements, \"measurement\"),\n (self.observations, \"observation\"),\n (self.conditions, \"condition_occurrence\"),\n (self.procedures, \"procedure_occurrence\")]:\n for entry in data:\n keys = \"person_id,\"\n values = f\"'{self.person['person_id']}',\"\n\n for key, value in entry.items():\n keys += f\"{key},\"\n values += f\"'{value}',\"\n\n entry[\"sql_id\"] = database.select(f\"\"\"INSERT INTO p21_cdm.{tablename}({keys[:-1]})\n VALUES({values[:-1]}) RETURNING {tablename}_id\"\"\")[0][0]\n\n # insert fact_relationships in both directions\n for table1, entry1, table2, entry2 in self.fact_relations:\n # 44818890 = Finding associated with (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table1}','{entry1['sql_id']}','{table2}','{entry2['sql_id']}','44818890')\"\"\")\n # 44818792 = Associated with finding (SNOMED)\n database.select(f\"\"\"INSERT INTO p21_cdm.fact_relationship(domain_concept_id_1, fact_id_1, \n domain_concept_id_2, fact_id_2, \n relationship_concept_id)\n VALUES('{table2}','{entry2['sql_id']}','{table1}','{entry1['sql_id']}','44818792')\"\"\")\n\n # make transactions persistent\n database.commit()", "def insert_data(self) -> None:\n if self.min_insert_size > self.insert_count:\n LOG.debug(\"Not enough data for insert....\")\n return\n LOG.debug(f'Inserting {self.insert_count} records...')\n self.insert.write(self.copy_trailer)\n self.insert.seek(0)\n conn = pg.connect(self.dsn)\n with conn.cursor() as cur:\n cur.copy_expert(self.cmd, self.insert)\n conn.commit()\n conn.close()\n self.insert.close()\n self.create_byte_buffer()", "def insert_data(instance_id, database_id):\n spanner_client = spanner.Client()\n instance = spanner_client.instance(instance_id)\n database = instance.database(database_id)\n\n with database.batch() as batch:\n batch.insert(\n table='achieve',\n columns=('user_id', 'is_buy_completed', 'is_fta_completed','is_liked_completed', 'is_list_completed', 'is_profile_completed','is_registeration_completed', 'is_sold_completed', 'is_ss_completed',),\n values=[\n (1, 1, 1, 1, 1, 1, 1, 1, 1),\n (2, 1, 1, 1, 1, 0, 0, 0, 0),\n (3, 1, 0, 1, 0, 1, 0, 1, 0),\n (4, 0, 1, 0, 1, 0, 1, 0, 1),\n (5, 0, 0, 0, 0, 0, 0, 0, 0)])\n\n print('Inserted data.')", "def on_success(self, data):\n if 'text' not in data:\n logging.warning(\"Recieved tweet without text\")\n return\n\n # Save the name of the collection task alongside the tweet data\n data['collection'] = self.name\n\n # Calculate a timestamp object from the data\n ts_float = float(data['timestamp_ms'])\n data['timestamp_obj'] = datetime.utcfromtimestamp(ts_float/1000)\n\n # Insert the tweet into the database\n insertid = None\n if self.db is not None:\n insertid = self.db.insert_one(data).inserted_id\n\n # Call the callback functions if exists\n if self.callbacks is not None:\n for f in self.callbacks:\n f(self.name, data, insertid)", "def insertData(self, table, title, rating, authorinfo, pubinfo):\n\n\t\tsql = \"insert into %s (bookname, authorinfo, pubinfo, rating) \\\n\t\t\tvalues('%s', '%s', '%s', '%s')\" %(table, title, authorinfo,\n\t\t\tpubinfo, rating)\n\t\ttry:\n\t\t\tself.cursor.execute(sql)\n\t\t\tself.conn.commit()\n\t\texcept Exception, e:\n\t\t\tsys.exit()", "def _bulk_add_rows(self, converted) :\n\n insert_sql = 'INSERT INTO \"%s\" VALUES (%s)' % (self.name, ','.join(['?'] * len(self.cols)))\n cur = self.con.cursor()\n cur.executemany(insert_sql, converted)", "def insert_records(self, insert_query, insert_query_columns, wiki_data, table_name):\n print(\"Inserting {} rows into {}\".format(len(wiki_data), table_name))\n for index, item in enumerate(wiki_data):\n values_to_insert = [item[column]['value'] for column in insert_query_columns]\n try:\n self.cur.execute(insert_query, values_to_insert)\n except ValueError as ve:\n print(\"Could not execute query : {} with values\".format(insert_query, values_to_insert))\n raise ve\n\n if index % 1000 == 0:\n print(\"Inserted {} rows\".format(index))\n print(\"Inserted {} rows\".format(len(wiki_data)))\n print(\"Finished inserting {}\".format(table_name))", "def fill_target_table(new_data, curs, conn, overwrite=False):\n for i in new_data:\n connect_database.add_target_to_database(list(i), curs, conn, overwrite_exsiting = overwrite)\n conn.commit()", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_into_tweet_peaks(self, peak_list_info):\n query = \"\"\"insert into\n tweet_peaks(id, peak_datetime, time_frame, hashtag, mean, std, sensibility\n , freq_limit, qt_tweets, probability)\n values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n try:\n conn.executemany(query, peak_list_info)\n except Exception:\n logger.exception(\"fail to insert peak data: {}\".format(peak_list_info))", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def processIdiom(i, idiom):\n global db\n cursor = db.cursor()\n \n statuses = searchIdiom(i, idiom)\n #Should have at least 10 statuses to be useful\n if len(statuses) < 10:\n return\n # loop through each of my statuses, and print its content\n for status in statuses:\n #print status[\"text\"]\n try:\n id_str = status[\"id_str\"]\n text = status[\"text\"].encode('ascii','ignore')\n retweet_count = status[\"retweet_count\"]\n user = status[\"user\"]\n created_at = status[\"created_at\"]\n entities = status[\"entities\"]\n entities = json.dumps(entities)\n\n user_id_str = user[\"id_str\"]\n name = user[\"name\"].encode('ascii','ignore')\n screen_name = user[\"screen_name\"]\n description = user[\"description\"].encode('ascii','ignore')\n user_entities = json.dumps(user[\"entities\"])\n followers_count = user[\"followers_count\"]\n listed_count = user[\"listed_count\"]\n profile_image_url = user[\"profile_image_url\"]\n verified = str(user[\"verified\"])\n\n \n cursor.execute('INSERT IGNORE INTO idiomatic_tweets(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(idiom, id_str, text, retweet_count, user_id_str, created_at, entities, name, profile_image_url, screen_name, verified))\n\n cursor.execute('INSERT IGNORE INTO idiomatic_users(id_str, name, screen_name, description, entities, followers_count, listed_count, profile_image_url, verified) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);' \\\n ,(user_id_str, name, screen_name, description, user_entities, followers_count, listed_count, profile_image_url, verified))\n except Exception as e:\n print('Error : ', e)\n print sys.exc_traceback.tb_lineno \n\n if statuses:\n cursor.execute('INSERT IGNORE INTO idiomatic_idioms(idiom) VALUES (%s);', (idiom,))\n else:\n print \"statuses\" , statuses", "def insertdata():\n import models \n from models import Ngrams\n from models import Phrases\n allphrases = {}\n phrase_index= {}\n # Reading 100000 questions for this project. Original data was 7GB \n # and very large to process.\n r = engine.execute('select * from questions where id < 100000')\n data = r.fetchall()\n for row in data:\n answer = row[4]\n # Tokenizing answer\n ans = answer.split()\n for i in range(len(ans)):\n # Running inner loop to generate trigrams\n for j in range(i+1, len(ans)+1):\n phrase = \" \".join(ans[i:j])\n # Getting only 3 grams instead of all ngrams\n if len(phrase.split()) < 4:\n print row[0]\n lemmaphrase = lemmatize(ans[i:j])\n ng = Ngrams(row[0],phrase, lemmaphrase)\n db_session.add(ng)\n phrase = phrase.lower()\n if phrase not in allphrases:\n allphrases[phrase] = [phrase.lower()]\n phrase_index[phrase] = newPhraseInfo(phrase)\n phrase_index[phrase][\"count\"] += 1\n phrase_index[phrase][\"ids\"].add(str(row[0]))\n db_session.commit()", "def insert_data(self):\n\n pass", "def insert(conn, table_info, table_data):\n\n sql = ''' INSERT INTO ''' + table_info \n + ''' VALUES(''' + \"?,\" * (len(table_data)-1) + \"?)\"\n cursor = conn.cursor()\n cursor.execute(sql, table_data)\n conn.commit()", "def populate_twitter_account_to_db():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n with open(NEWSFEED['TWITTER']['ACCOUNT_LIST'], 'r') as f:\n lines = f.readlines()\n for l in lines:\n screen_name = l.strip()\n\n if CredibleUSTwitterAccount.objects.filter(screen_name=screen_name).exists():\n continue\n\n try:\n twitteruser = api.GetUser(screen_name=screen_name)\n CredibleUSTwitterAccount.objects.create(screen_name=twitteruser.screen_name,\n uid=twitteruser.id,\n description=twitteruser.description)\n except TwitterError as e:\n print(e.message)", "def bulkInsert(self, url, values):\n pass", "def insert_into_request_table(self, request_list):\n query = \"\"\"insert into\n requests(created_at, track_list, languages, locations, minimum_tweet_per_sec,\n time_frame, peak_detection_sensibility, analysis_sample_size, db_tweets_name)\n values(?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n try:\n conn.execute(query, request_list)\n except Exception:\n logger.exception(\"fail to insert peak data: {}\".format(request_list))", "def insert_into_table(self, conn, insert_into_table_sql):\n try:\n c = conn.cursor()\n c.execute(insert_into_table_sql)\n conn.commit()\n\n except Error as e:\n print(e)", "def insert_to_db(self, query):\n try:\n q = self.connection.execute(query)\n except Exception:\n self.print_std_error()", "def insert_movie_data(self, movie_people_dict):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.executemany(\n 'INSERT INTO movie_table VALUES(?,?);',\n movie_people_dict.items()\n )\n self._close_connection(conn)", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def _insert_bulk(self, iterable):\n self.cursor.executemany(self.INSERT, iterable)\n self.conn.commit()", "def fillFollowerInDB(self):\n sqlInsertFollowers = \"INSERT INTO follower screen_name VALUES %s\"\n mycursor.execute(sqlInsertFollowers,self.screen_name)\n mydb.commit()", "def insert_into_citation_table(citations):\n cursor = connection.cursor()\n cursor.execute(\n 'DROP TABLE IF EXISTS Citations;'\n 'CREATE TABLE Citations(sourcePaperId INT, targetPaperId INT, citationId INT NOT NULL AUTO_INCREMENT PRIMARY KEY);'\n )\n cursor.close()\n i = 0\n\n for citation in citations:\n print i\n i+=1\n cursor = connection.cursor()\n cursor.execute(\n 'INSERT INTO Citations (sourcePaperId, targetPaperId) VALUES '\n '((SELECT PaperId FROM Papers WHERE DOI = %s),(SELECT PaperId FROM Papers WHERE DOI = %s));'\n ,[citation['source'].encode('UTF-8'),citation['target'].encode('UTF-8')])\n cursor.close()", "def persist_data(tweet_data, cassandra_session):\n try:\n logger.debug('Start to persist data to cassandra %s \\n', tweet_data)\n parsed = json.loads(tweet_data)\n unit_id = str(parsed.get('_unit_id'))\n gender = parsed.get('gender')\n tweet_text = str(parsed.get('text'))\n hashtags = str(parsed.get('hashtags'))\n tweet_count = parsed.get('tweet_count')\n tweet_location = parsed.get('tweet_location')\n normalized_location = parsed.get('normalized_location')\n user_timezone = parsed.get('user_timezone')\n\n # statement = \"INSERT INTO %s (unit_id, gender, tweet_text, tweet_location, normalized_location) VALUES ('%s', '%s', '%s', '%s', '$s')\" % (data_table, unit_id, gender, tweet_text, tweet_location, normalized_location)\n statement = cassandra_session.prepare(\"INSERT INTO %s (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location) VALUES (?, ?, ?, ?, ?, ?, ?)\" % data_table)\n cassandra_session.execute(statement, (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location))\n logger.info('Persisted data to cassandra for unit_id: %s, gender: %s, tweet_text: %s, hashtags: %s, tweet_count: %s, tweet_location: %s, normalized_location: %s\\n' % (unit_id, gender, tweet_text, hashtags, tweet_count, tweet_location, normalized_location))\n except Exception as e:\n logger.error('Failed to persist data to cassandra %s %s \\n', tweet_data, e)", "def __insert_time_data(cur, df):\n # convert timestamp column to datetime\n t = pd.to_datetime(df.ts, unit='ms')\n \n # create a dataframe with corresponding database values\n time_data = (\n t,\n t.dt.hour.values,\n t.dt.day.values,\n t.dt.weekofyear.values,\n t.dt.month.values,\n t.dt.year.values,\n t.dt.weekday.values\n )\n column_labels = (\"timestamp\", \"hour\", \"day\", \"week\", \"month\", \"year\", \"weekday\")\n time_df = pd.DataFrame.from_dict(\n dict(\n zip(column_labels, time_data)\n )\n )\n\n # insert time data records\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))", "def insert_execute(self, insert_data):\n self.execute(query=self.db_insert_schema.format(self.table_name), data=insert_data)", "def insert(self, teacher: Teacher):\n sql = f''' INSERT INTO {self.table_name}({','.join([f[0] for f in Teacher.FIELDS])})\n VALUES({('?,' * len(Teacher.FIELDS))[:-1]}) '''\n print(sql)\n teacher_dict = teacher.json_dump()\n print(teacher_dict)\n # assert 1==2\n self.cursor.execute(sql, teacher_dict)\n self.conn.commit()", "def add_tweet():\n if not request.json or 'author_id' not in request.json or 'text' not in request.json:\n abort(400)\n\n db = get_db()\n\n author_id = request.json.get('author_id')\n text = request.json.get('text')\n pub_date = int(time.time())\n\n db.execute('''insert into message (author_id, text, pub_date) values (?, ?, ?)''', (author_id, text, pub_date))\n db.commit()\n flash('Message recorded succesfully')\n message = {\"author_id\": author_id, \"text\": text, \"pub_date\": pub_date}\n return jsonify({'message': message}), 201", "def insert_user(usrObj):\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n collected=strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n col=[\"id_str\", \"follow_request_sent\", \"has_extended_profile\", \"profile_use_background_image\", \"contributors_enabled\", \"live_following\", \"translator_type\", \"verified\", \"blocked_by\", \"profile_text_color\", \"muting\", \"profile_image_url_https\", \"profile_sidebar_fill_color\", \"followers_count\", \"profile_sidebar_border_color\", \"default_profile_image\", \"ChangeFollower\", \"listed_count\", \"is_translation_enabled\", \"utc_offset\", \"statuses_count\", \"description\", \"friends_count\", \"location\", \"profile_link_color\", \"profile_image_url\", \"notifications\", \"geo_enabled\", \"profile_background_color\", \"blocking\", \"profile_background_image_url\", \"screen_name\", \"lang\", \"following\", \"profile_background_tile\", \"favourites_count\", \"name\", \"url\", \"CollectedTimeStamp\", \"created_at\", \"profile_background_image_url_https\", \"time_zone\", \"protected\", \"default_profile\", \"is_translator\"]\n\n userdb={}\n for key, value in usrObj.iteritems():\n if key in col:\n userdb[key]=usrObj[key]\n userdb[\"CollectedTimeStamp\"]=collected\n sqlite_insert(conn,'GTapp_twitteruser',userdb)", "def insert_data(data):\n\n try:\n sql = \"INSERT INTO movies VALUES(%s, %s, %s)\"\n conn = psycopg2.connect(dsn=DB_DSN)\n cur = conn.cursor()\n cur.executemany(sql, data)\n conn.commit()\n except psycopg2.Error as e:\n print e.message\n else:\n cur.close()\n conn.close()", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def save(self, data):\n query = \"INSERT INTO {} (title, body, meetup_id, user_id) \\\n VALUES('{}','{}','{}', '{}') RETURNING *\".format(self.table, data['title'], data['body'], data['meetup_id'], data['user_id'])\n self.cur.execute(query)\n result = self.cur.fetchone()\n self.conn.commit()\n return result", "def add_to_db(self, data_base):\n comment_data = self.get_comment(dictionary=True)\n comment_data.pop('id')\n cursor = data_base.cursor()\n cursor.execute(f\"INSERT INTO comment ({', '.join(comment_data.keys())}) VALUES {tuple(comment_data.values())}\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def insert(self, media):\n insert_query = \"\"\"INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(insert_query, media.totuple())\n self.connection.commit()", "def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()", "def insert_values():\n pass", "def insert_to_db(self, query):\n self.cursor.execute(query)\n self.conn.commit()\n return self", "def ingest(cls):\n for html in cls.query.all():\n session.add(html.parse())\n\n session.commit()", "def process_tweets(collection):\n\n\twith open('positive-tweets.txt') as p:\n\t\tprint \"{0}: Inserting positive tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in p.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 1})\n\tp.close()\n\n\twith open('negative-tweets.txt') as n:\n\t\tprint \"{0}: Inserting negative tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in n.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 0})\n\tn.close()", "def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')", "def execute(self, context):\n\n # Initialize PostgreSQL hook\n self.postgres = PostgresHook(\n postgres_conn_id=self.postgres_conn_id,\n schema=self.postgres_schema).get_sqlalchemy_engine()\n\n # Initialize Socrata hook\n super().execute()\n\n # Load table\n table = self._select_table()\n self.table_dicts = [dict(row) for row in table]\n\n if self.replace:\n result = self.socrata.replace(self.dataset_id, self.table_dicts)\n else:\n # Code from etl-airflow\n for i in range(0, len(self.table_dicts), UPLOAD_CHUNK_SIZE):\n try:\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])\n except:\n print(f\"Error on record {i}\")\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])", "def write_to_database(info,timer):\n\n inserts = create_sql_write(info,timer)\n\n connection = engine.connect()\n for insert in inserts:\n connection.execute(insert)\n connection.close()", "def _extract_and_insert(cursor, table, data, ignore_if_exists=True, **kwargs):\n if ignore_if_exists:\n return _insert_if_new(cursor, table, _subdict(_columns(cursor, table), data), **kwargs)\n else:\n return _insert_dict(cursor, table, _subdict(_columns(cursor, table), data), **kwargs)", "def insert(self, data):\r\n pass", "def write_to_db(self, df):\n #query for the history data\n query = \"INSERT IGNORE INTO spotify_history (Time, Song_Name, Spotify_ID, Spotify_URI, Popularity, Object_Type) VALUES (%s, %s, %s, %s, %s, %s)\"\n \n val = []\n for index, row in df.iterrows():\n #some songs don't have milisecond, so the dateformat needs to be adapted\n try:\n timestamp = datetime.strptime(row[\"timestamp\"], '%Y-%m-%dT%H:%M:%S.%fZ')\n except:\n datetime.strptime(row[\"timestamp\"], '%Y-%m-%dT%H:%M:%SZ')\n finally:\n val.append((timestamp, \n row[\"name\"], \n row[\"id\"], \n row[\"uri\"], \n row[\"popularity\"], \n row[\"object_type\"]))\n\n self.cursor.executemany(query, val)\n print(\"New Songs in the History {}\".format(self.cursor.rowcount))\n \n #query for the song properties\n query = \"INSERT IGNORE INTO song_data (Spotify_ID, Spotify_URI, Artist, Album, Duration, Acousticness, Danceability, Energy, Instrumentalness, key_spotify, Liveness, Loudness, Mode, Speechiness, Tempo, Time_Signature, Valence) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n val = []\n for index, row in df.iterrows():\n val.append((row[\"id\"], \n row[\"uri\"], \n row[\"artist\"], \n row[\"album\"], \n row[\"duration_ms\"], \n row[\"acousticness\"],\n row[\"danceability\"],\n row[\"energy\"],\n row[\"instrumentalness\"],\n row[\"key\"],\n row[\"liveness\"],\n row[\"loudness\"],\n row[\"mode\"],\n row[\"speechiness\"],\n row[\"tempo\"],\n row[\"time_signature\"],\n row[\"valence\"])\n )\n \n print(\"New Songs in the database: {}\".format(self.cursor.rowcount))\n self.cursor.executemany(query, val)\n \n self.connection.commit()", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()", "def insert(self, url, values):\n pass", "def insert_data(db, metadata, data):\n with Tx(db) as c:\n lock_tables(c)\n metadata['set_id'] = _insert_metadata(c, metadata)\n\n data_iterator = iter(data)\n first_row = next(data_iterator)\n headers = list(first_row.keys())\n for table in _tables_from_headers(headers):\n _insert_data_rows(c, table, metadata, chain([first_row], data_iterator))", "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def callback(message):\n try:\n # some unicode cleanup\n line = message.data.replace('\\\\u0000'.encode(),''.encode())\n temp = json.loads(line)\n if not 'user' in temp.keys():\n # means this message doesn't contain a bonafide tweet\n message.ack()\n return\n userdict = temp['user']\n if userdict['url'] is None:\n # so no null URL values get loaded\n userdict['url'] = '-'\n if userdict['location'] is None:\n userdict['location']=''\n if temp['geo'] is not None:\n lat = temp['geo']['coordinates'][0]\n lng = temp['geo']['coordinates'][1]\n else:\n lat=0\n lng=0\n\n placedict = temp['place']\n # some tweets have place information, others don't, we need a different\n # set of SQL statements for each case.\n if placedict is not None:\n placeid = temp['place']['id']\n insertsqlplace = \"\"\"\n INSERT INTO users \n (id, name, screen_name, location,\n url, description, verified, followers_count, friends_count,\n statuses_count, geo_enabled, lang, created_at,\n profile_background_image_url) \n VALUES\n (%(id)s, %(name)s, %(screen_name)s, %(location)s, %(url)s,\n %(description)s, %(verified)s, %(followers_count)s, %(friends_count)s,\n %(statuses_count)s, %(geo_enabled)s, %(lang)s, %(created_at)s,\n %(profile_background_image_url)s) \n ON CONFLICT DO NOTHING;\n \n INSERT INTO place\n (id, url, place_type, name, full_name, bounding_box_json)\n VALUES\n (%(idp)s, %(urlp)s, %(place_typep)s, %(namep)s,\n %(full_namep)s, %(bounding_box_jsonp)s) \n ON CONFLICT DO NOTHING;\n \n INSERT INTO tweets \n (tweet_id,\n tweet_text, tweet_source, in_reply_to_status_id, in_reply_to_user_id,\n tweet_date, place_id, user_id, geo_lat, geo_lng) \n VALUES \n (%(tweet_idt)s,\n %(tweet_textt)s, %(tweet_sourcet)s, %(in_reply_to_status_idt)s,\n %(in_reply_to_user_idt)s, %(tweet_datet)s, %(place_idt)s, %(user_idt)s,\n %(geo_latt)s, %(geo_lngt)s) \n ON CONFLICT DO NOTHING;\n \"\"\"\n parametersplace = {'id': userdict['id'],\n 'name': userdict['name'],\n 'screen_name': userdict['screen_name'],\n 'location': userdict['location'],\n 'url': userdict['url'],\n 'description': userdict['description'],\n 'verified': userdict['verified'],\n 'followers_count': userdict['followers_count'],\n 'friends_count': userdict['friends_count'],\n 'statuses_count': userdict['statuses_count'],\n 'geo_enabled': userdict['geo_enabled'],\n 'lang': userdict['lang'],\n 'created_at': parser.parse(userdict['created_at']),\n 'profile_background_image_url': userdict['profile_image_url'],\n 'idp': placedict['id'],\n 'urlp': placedict['url'],\n 'place_typep': placedict['place_type'],\n 'namep': placedict['name'],\n 'full_namep': placedict['full_name'],\n 'bounding_box_jsonp': json.dumps(placedict['bounding_box']),\n 'tweet_idt': temp['id'],\n 'tweet_textt': temp['text'],\n 'tweet_sourcet': temp['source'],\n 'in_reply_to_status_idt': temp['in_reply_to_status_id'],\n 'in_reply_to_user_idt': temp['in_reply_to_user_id'],\n 'tweet_datet': parser.parse(temp['created_at']),\n 'place_idt': placeid,\n 'user_idt': temp['user']['id'],\n 'geo_latt': lat, 'geo_lngt': lng}\n else:\n placeid = None\n insertsqlplace = \"\"\"\n INSERT INTO users \n (id, name, screen_name, location,\n url, description, verified, followers_count, friends_count,\n statuses_count, geo_enabled, lang, created_at,\n profile_background_image_url) \n VALUES\n (%(id)s, %(name)s, %(screen_name)s, %(location)s, %(url)s,\n %(description)s, %(verified)s, %(followers_count)s, %(friends_count)s,\n %(statuses_count)s, %(geo_enabled)s, %(lang)s, %(created_at)s,\n %(profile_background_image_url)s) \n ON CONFLICT DO NOTHING;\n\n INSERT INTO tweets \n (tweet_id,\n tweet_text, tweet_source, in_reply_to_status_id, in_reply_to_user_id,\n tweet_date, place_id, user_id, geo_lat, geo_lng) \n VALUES \n (%(tweet_idt)s,\n %(tweet_textt)s, %(tweet_sourcet)s, %(in_reply_to_status_idt)s,\n %(in_reply_to_user_idt)s, %(tweet_datet)s, %(place_idt)s, %(user_idt)s,\n %(geo_latt)s, %(geo_lngt)s) \n ON CONFLICT DO NOTHING;\n \"\"\"\n parametersplace = {'id': userdict['id'],\n 'name': userdict['name'],\n 'screen_name': userdict['screen_name'],\n 'location': userdict['location'],\n 'url': userdict['url'],\n 'description': userdict['description'],\n 'verified': userdict['verified'],\n 'followers_count': userdict['followers_count'],\n 'friends_count': userdict['friends_count'],\n 'statuses_count': userdict['statuses_count'],\n 'geo_enabled': userdict['geo_enabled'],\n 'lang': userdict['lang'],\n 'created_at': parser.parse(userdict['created_at']),\n 'profile_background_image_url': userdict['profile_image_url'],\n 'tweet_idt': temp['id'],\n 'tweet_textt': temp['text'],\n 'tweet_sourcet': temp['source'],\n 'in_reply_to_status_idt': temp['in_reply_to_status_id'],\n 'in_reply_to_user_idt': temp['in_reply_to_user_id'],\n 'tweet_datet': parser.parse(temp['created_at']),\n 'place_idt': placeid,\n 'user_idt': temp['user']['id'],\n 'geo_latt': lat, 'geo_lngt': lng}\n curr.execute(insertsqlplace,parametersplace)\n # commit the insert statements to the data base and then acknowledge\n # parsing of the message if successful.\n conn.commit()\n message.ack()\n if round(randint(0, 50000)/100)==250:\n print('Added ~'+str(250)+' tweets! '+temp['created_at'])\n except:\n print('Error loading tweet.')", "def store_data(self, data):\n self.data = data\n # HERE\n the_main_dict = {**self.user_data(), **self.entities_data(), **self.extract_relevant(), **self.locate(),\n **self.calculate_days(), **self.clean_user_desc()}\n # The below is the reason that the table creation must be written in alphabetical order. This is simpler than\n # writing the complex joins that would otherwise be needed.\n my_keys_list = sorted(the_main_dict.keys())\n my_items = list(map(lambda x: str(the_main_dict[x]).replace(\"'\", ''), my_keys_list))\n try:\n # Unpacks the items into an insert statement for the SQLite table\n self.conn.execute(\"INSERT INTO {0} VALUES('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}',\"\n \"'{10}','{11}','{12}','{13}','{14}','{15}','{16}','{17}','{18}','{19}','{20}',\"\n \"'{21}','{22}','{23}','{24}','{25}','{26}','{27}','{28}')\".format(self.table, *my_items))\n self.limiting += 1\n return 0\n except sqlite3.IntegrityError:\n return 1", "def get_tweets_data(self):\n query = \"select * from tweets;\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n proc_data = conn.execute(query)\n data = proc_data.fetchall()\n\n cols = [\"id\", \"tweet_id\", \"insert_date\", \"created_at\", \"hashtag\"]\n tweets = pd.DataFrame.from_records(data=data, columns=cols)\n\n return tweets", "def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()", "def insert_trades(self):\n\n if self.truncate_tables:\n if self.verbose:\n a,b,c = self.buy_table, self.sell_table, self.pending_table\n print(f\"Truncating tables {a}, {b}, and {c}\")\n\n sql = f'TRUNCATE TABLE {self.buy_table};'\n tb.Database().write(sql)\n sql = f'TRUNCATE TABLE {self.pending_table};'\n tb.Database().write(sql)\n sql = f'TRUNCATE TABLE {self.sell_table};'\n tb.Database().write(sql)\n\n tb.Database().insert(self.buy_table, list(self.buys.values()))\n tb.Database().insert(self.sell_table, list(self.sells.values()))\n tb.Database().insert(self.pending_table, list(self.pending.values()))\n\n if self.verbose:\n print(\"Insert successful\")", "def put_it_in_tables(self):\n my_connection = mysql.connector.connect(user=self.user, password=self.password, database='openfoodfacts')\n cursor = my_connection.cursor(buffered=True)\n for i in self.my_data:\n prod_name = i['product_name']\n try:\n add_aliment = (\"INSERT INTO aliment \"\n \"(product_name, product_description, barcode, nutritional_score, stores, product_category) \"\n \"VALUES (%s, %s, %s, %s, %s, %s)\")\n data_aliment = (i['product_name'].replace(\"'\", \"''\"), i['product_description'].replace(\"'\", \"''\"), i['barcode'].replace(\"'\", \"''\"), i['nutritional_score'].replace(\"'\", \"''\"), i['stores'].replace(\"'\", \"''\"), i['product_category'].replace(\"'\", \"''\"))\n cursor.execute(add_aliment, data_aliment)\n except mysql.connector.IntegrityError:\n pass \n my_connection.commit()\n cursor.close()\n my_connection.close()\n print(\"ok c'est fait\")", "def fill_the_db(testapp):\n session_factory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(session_factory, transaction.manager)\n for entry in ENTRIES:\n row = Entry(title=entry[\"title\"],\n creation_date=entry[\"creation_date\"],\n body=entry[\"body\"])\n dbsession.add(row)", "def _query_insert(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n conn.commit()\n cur.close()\n conn.close()", "def createDB(self):\n\n\n mycursor.execute(\"DROP TABLE tweet\")\n mycursor.execute(\"DROP TABLE follower\")\n\n mycursor.commit()\n\n createFollowerTable = \"CREATE TABLE follower (\" \\\n \"screen_name VARCHAR(255),\" \\\n \"name varchar(255),\" \\\n \"PRIMARY KEY(screen_name)\" \\\n \")\"\n\n #createTweetTable = \"CREATE TABLE tweet (\" \\\n # \"idT VARCHAR(255),\" \\\n # \"idF VARCHAR(255),\" \\\n # \"type VARCHAR(255),\" \\\n # \"content VARCHAR(140),\" \\\n # \"weight INTEGER(10),\" \\\n # \"PRIMARY KEY(idT),\" \\\n # \"FOREIGN KEY (idF) REFERENCES follower(idF)\" \\\n # \")\"\n\n mycursor.execute(createFollowerTable)\n #mycursor.execute(createTweetTable)\n\n mydb.commit()", "def insert_values(listingid_to_text):\n sql = \"INSERT INTO listingid_to_text_english VALUES (%s, %s)\"\n args = [(key, val) for key, val in listingid_to_text.iteritems()]\n conn = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n \n print(\"here\")\n # execute the INSERT statement\n cur.executemany(sql, args)\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def add_tweet(self, tweet):\n if tweet.guid not in self.guids:\n self.guids.append(tweet.guid)\n self.data.append(tweet)", "def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False", "def insert_data_bulk(self, table_name, data):\n if len(data) == 0:\n return\n\n fields = \", \".join(data[0].keys())\n value_placeholders = \", \".join([\"%s\" for f in data[0].keys()])\n query = \"INSERT INTO %s(%s) VALUES (%s)\" % (table_name, fields, value_placeholders)\n\n data = [tuple(self.pack(data_point.values())) for data_point in data]\n\n chunk_size = 50000\n data_chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)]\n for chunk in data_chunks:\n self.cursor.executemany(query, chunk)\n self.db_connection.commit()", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def populate_hateword_data():\n with open(\"./data/hate-speech-lexicons/refined_ngram_dict.csv\") as f:\n lst = [row.split(',', 1)[0] for row in f]\n lst = lst[1:]\n\n lst = [{\n 'word': word,\n 'category': [],\n 'similar_to': []\n } for word in lst]\n\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.hateword.delete_many({})\n result = db.hateword.insert_many(lst)\n print(\"Completed populating\", len(result.inserted_ids), \"hate words\")\n except pymongo.errors.BulkWriteError as e:\n print(e.details)" ]
[ "0.787744", "0.7727903", "0.7385115", "0.7071366", "0.6870072", "0.6737006", "0.6506767", "0.64741445", "0.64702773", "0.63960886", "0.63523656", "0.63268894", "0.6325997", "0.62656724", "0.6228846", "0.6178256", "0.6176296", "0.6122586", "0.6111885", "0.6098994", "0.6073444", "0.6063117", "0.60426015", "0.60162824", "0.59827644", "0.5971619", "0.5942944", "0.5914096", "0.5863141", "0.5860127", "0.5849958", "0.5837789", "0.5830791", "0.5830782", "0.5820096", "0.5816397", "0.5795231", "0.5788521", "0.5765687", "0.5760702", "0.57573754", "0.5756327", "0.57561713", "0.5743467", "0.57322586", "0.57132274", "0.5711824", "0.57029325", "0.5700163", "0.5687084", "0.5686309", "0.5683819", "0.5669783", "0.56667864", "0.5649374", "0.5641796", "0.56241345", "0.56117874", "0.5601458", "0.559303", "0.55885214", "0.5579013", "0.5578559", "0.557308", "0.55706", "0.55642176", "0.55616665", "0.5557731", "0.5556748", "0.5555149", "0.55467093", "0.55463016", "0.55449665", "0.5544511", "0.5537255", "0.55358267", "0.5530156", "0.5522605", "0.5521823", "0.55210334", "0.55198663", "0.55194545", "0.5507296", "0.5504358", "0.5500133", "0.5494644", "0.5494539", "0.54930156", "0.54796636", "0.5479164", "0.5478315", "0.547702", "0.5474446", "0.54734665", "0.54671544", "0.546133", "0.54566187", "0.5454756", "0.54533607", "0.545279" ]
0.76859015
2
returns fields key value dict
def prepare_from_tx(cls, txcomment, session=None): data_dict = deepcopy(txcomment.__dict__) data_dict['block_num'] = txcomment.block_num data_dict['transaction_num'] = txcomment.transaction_num data_dict['operation_num'] = txcomment.operation_num data_dict['timestamp'] = txcomment.timestamp data_dict['type'] = txcomment.type data_dict['txcomment'] = txcomment data_dict['session'] = session or object_session(txcomment) prepared = cls._prepare_for_storage(data_dict=data_dict) return prepared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(node):\r\n return dict(iter_fields(node))", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def _fields_to_dict(fields_in):\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help, \n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out", "def _get_field_details(self, data, fields):\n fields_metadata = dict()\n for field in fields:\n dtype = data[field].dtype\n field_template = self._FIELD_TEMPLATES.get(dtype.kind)\n if not field_template:\n raise ValueError('Unsupported dtype {} in column {}'.format(dtype, field))\n\n field_details = copy.deepcopy(field_template)\n fields_metadata[field] = field_details\n\n return fields_metadata", "def fields(fields, keep_field_in_value=False, key_as_tuple=False):\n if isinstance(fields, str):\n fields_set = {fields}\n fields = (fields,)\n else:\n fields_set = set(fields)\n\n def item2kv(item):\n if keep_field_in_value:\n key = dict()\n for k, v in item.items():\n if k in fields_set:\n key[k] = v\n val = item\n else:\n key = dict()\n val = dict()\n for k, v in item.items():\n if k in fields_set:\n key[k] = v\n elif not keep_field_in_value:\n val[k] = v\n\n if key_as_tuple:\n return tuple(key[f] for f in fields), val\n else:\n return key, val\n\n return item2kv", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def fields(self):\r\n return self._by_name.iteritems()", "def fields(self) -> Mapping[str, str]:\n return pulumi.get(self, \"fields\")", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"assignedDateTime\": lambda n : setattr(self, 'assigned_date_time', n.get_datetime_value()),\n \"capabilityStatus\": lambda n : setattr(self, 'capability_status', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"service\": lambda n : setattr(self, 'service', n.get_str_value()),\n \"servicePlanId\": lambda n : setattr(self, 'service_plan_id', n.get_uuid_value()),\n }\n return fields", "def fields(self):\n _fields = {\n i: attrgetter(i) for i in ('pf_type', 'label',)\n }\n _fields['host'] = self.get_safely_instance_partial(Host, 'host')\n return _fields", "def asPyDict(self):\n fieldDict = dict()\n for kvp in self.keyvaluepair_set.all():\n fieldDict[kvp.key] = kvp.value\n return fieldDict", "def custom_fields(self) -> dict:\n url = f'{self.api_url}Fields?apiKey={self.api_key}'\n r_dict = self._es_get_request(url)\n self._check_response(r_dict)\n\n return {l['Field']['Name']: l['Field']['Id'] for l in\n r_dict['ApiResponse']['Data']['Fields']} # list of dicts", "def registered_fields(self):\n return {key for mapping in self for key in mapping.mapping.keys()}", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def _query_fields(data):\n keys = data.keys() # Do this once to avoid any issue with dictionary order.\n column_fields = ', '.join(keys)\n value_fields = ', '.join('%({})s'.format(field) for field in keys)\n return column_fields, value_fields", "def fields_dict(self):\n return self._declared_fields", "def _fields_to_dict(fields_in):\n\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n # argparse hates % sign:\n if '%' in val_help:\n # replace single occurancies of '%' with '%%'\n # since '%%' is accepted by argparse\n val_help = re.sub(r'(?<!%)%(?!%)', r'%%', val_help)\n\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help,\n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except Exception:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out", "def record_fields(self):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n record_fields_dict = {}\n for group_name, dset in record_fields_grp.items():\n record_fields_dict[group_name] = list(dset.asstr())\n\n return record_fields_dict", "def _get_fields(self):\n return self._fields", "def get_fields(self):\n \n return self.metadata.keys()", "def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)", "def fields(self):", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def values(self, fields):\n\n values = {}\n\n for field in fields:\n if field.value is None and field.default is not None:\n values[field.name] = field.default\n else:\n values[field.name] = field.value\n\n return values", "def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self):\n\n return {\n attr: field['serializer']\n for attr, field in self._fields.items()\n }", "def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}", "def _get_fields_key(resource):\n if resource['code'] in [HTTP_OK, HTTP_ACCEPTED]:\n if (MODEL_RE.match(resource_id) or\n ANOMALY_RE.match(resource_id)):\n return resource['object']['model']['model_fields']\n elif CLUSTER_RE.match(resource_id):\n return resource['object']['clusters']['fields']\n elif CORRELATION_RE.match(resource_id):\n return resource['object']['correlations']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif STATISTICAL_TEST_RE.match(resource_id):\n return resource['object']['statistical_tests']['fields']\n elif LOGISTIC_REGRESSION_RE.match(resource_id):\n return resource['object']['logistic_regression']['fields']\n elif ASSOCIATION_RE.match(resource_id):\n return resource['object']['associations']['fields']\n elif SAMPLE_RE.match(resource_id):\n return dict([(field['id'], field) for field in\n resource['object']['sample']['fields']])\n else:\n return resource['object']['fields']\n return None", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def make_dict(cls, fields, fields_kwargs):\n return utils.make_dict(fields, fields_kwargs)", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def get_fields(csv_file, fields):\n result = OrderedDict()\n for field in fields:\n result[field] = []\n \n with open(csv_file) as f:\n reader = csv.DictReader(f)\n for row in reader:\n for field in fields:\n result[field].append(row[field])\n \n return result", "def get_fields(self):\r\n return self.fields", "def get_fields(cls):\n return cls.fields.values()", "def _dataset_fields(geno):\n return {'title': geno['title'], 'notes': geno.get('notes', '')}", "def _fields(self):\n fields = [(\"serial\", self.serial), (\"active\", str(self.active)),\n (\"name\", self.name), (\"version\", self.version),\n (\"auto_update\", str(self.auto_update)),\n (\"new_version_available\", str(self.new_version_available)),\n (\"product_type\", self.product_type),\n (\"network_device\", str(self.network_device))]\n return fields", "def map_field_name_to_attribute() -> typing.Dict:\n return {\n \"tag\": \"tag\",\n \"contact\": \"contact\",\n }", "def fields(class_or_instance):\n\n # Might it be worth caching this, per class?\n try:\n fields_dict = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError(\"must be called with a dataclass type or instance\")\n\n return fields_dict", "def fields() -> Dict[str, models.Field]:\n return dict(\n (field.name, field)\n for field in AccountTier._meta.get_fields()\n if field.name not in [\"id\"]\n )", "def get_kwargs(person_data, field):\n if field == '\\xef\\xbb\\xbfAcc_Index':\n return {'acc_index': person_data[field]}\n if field == 'Vehicle_Reference':\n return {'gb_data': person_data}\n if field == 'Casualty_Reference':\n return {'person_data': person_data}\n if field == 'Car_Passenger':\n return {'person_data': person_data, 'value': person_data[field]}\n return {'value': person_data[field]}", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def _datastore_fields(fs):\n return [{\n 'id': f['datastore_id'],\n 'type': _column_type(f['datastore_type'])}\n for f in fs]", "def field_wrapper(field):\n return {'field': field}", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def get_fields(self):\n\n\t\treturn self.__fields", "def result_field_map():\n return {\n \"[run number]\": \"run_number\",\n \"map-file\": \"map_file\",\n \"People\": \"people\",\n \"person_path_weight\": \"person_path_weight\",\n \"Slow\": \"slow\",\n \"Medium\": \"medium\",\n \"Fast\": \"fast\",\n \"display-path-cost?\": \"display_path_cost_p\",\n \"add-person-spacing?\": \"add_person_spacing_p\",\n \"people-wait?\": \"people_wait_p\",\n \"equal-diagonal-weight?\": \"equal_diagonal_weight_p\",\n \"Slow-Speed\": \"slow_speed\",\n \"Medium-Speed\": \"medium_speed\",\n \"Fast-Speed\": \"fast_speed\",\n \"set-fire?\": \"set_fire_p\",\n \"Fire_Speed\": \"fire_speed\" ,\n \"mean-escape-time\": \"mean_escape_time\",\n }", "def get_properties(self, *fields: str) -> Dict[str, fields.Schema]:\n properties = {}\n for field in fields:\n properties.update(self.get_property(field))\n return properties", "def _to_known_field(cls, field_name: str, value) -> (Column, dict):\n field_names = field_name.split(\".\", maxsplit=1)\n if len(field_names) == 2:\n for field in cls.__fields__:\n if field.name == field_names[0] and field.field_type == dict:\n return field, {field_names[1]: value}\n return None, None", "def fields(self):\n ...", "def getFieldInfoDictionary(field, format=None):\n format = format or _getDocFormat(field)\n\n info = {'name': field.getName(),\n 'required': field.required,\n 'required_string': field.required and 'required' or 'optional',\n 'default': repr(field.default),\n 'title': field.title}\n\n # Determine the interface of the field\n iface = getFieldInterface(field)\n info['iface'] = {'name': iface.getName(), 'id': getPythonPath(iface)}\n\n # Determine the field class\n class_ = field.__class__\n info['class'] = {'name': class_.__name__,\n 'path': getPythonPath(class_).replace('.', '/')}\n\n # Render the field description\n info['description'] = renderText(field.description or '', format=format)\n\n return info", "def fields(self):\n for name, values in self.items():\n if name == \"set-cookie\":\n # Set-Cookie special case\n for value in values:\n yield (name, value)\n else:\n yield (name, \", \".join(values))", "def _parse_metadata_fields(key_value_block: str) -> Dict[str, str]:\n key_value_block = key_value_block.lstrip()\n field_lines = re.split(r'\\n', key_value_block)\n field_name = 'unknown'\n fields_builder: Dict[str, str] = {}\n for field_line in field_lines:\n field_match = RE_FIELD_COMPONENTS.match(field_line)\n if field_match and field_match.group('field') in NAMED_FIELDS:\n field_name = field_match.group(\n 'field').lower().replace('-', '_')\n field_name = re.sub(r'_no$', '_num', field_name)\n fields_builder[field_name] = field_match.group(\n 'value').rstrip()\n elif field_name != 'unknown':\n # we have a line with leading spaces\n fields_builder[field_name] += re.sub(r'^\\s+', ' ', field_line)\n return fields_builder", "def recordToDict(self, record):\n fields = {}\n if record is not None:\n for field, value in record.fields.iteritems():\n\n # FIXME: need to sort out dealing with enormous groups; we\n # can ignore these when sending AMP responses because the\n # client will always fetch members via a members( ) AMP\n # command.\n if field.name in (u\"memberDNs\", u\"memberUIDs\"):\n continue\n\n valueType = record.service.fieldName.valueType(field)\n if valueType in (unicode, bool):\n fields[field.name] = value\n elif valueType is uuid.UUID:\n fields[field.name] = str(value)\n elif issubclass(valueType, (Names, NamedConstant)):\n fields[field.name] = value.name if value else None\n return fields", "def get(self) -> dict:\n return parse_fields(raw=self._get().document_meta)", "def _get_tracked_fields(self, updated_fields):\n tracked_fields = []\n for name, field in self._fields.items():\n if getattr(field, 'string'):\n tracked_fields.append(name)\n\n if tracked_fields:\n return self.fields_get(tracked_fields)\n return {}", "def field_names(self):\n ...", "def _get_plugin_form_data(self, fields):\n form_data = {}\n for field, default_value in fields:\n try:\n form_data.update(\n {field: self.plugin_data.get(field, default_value)}\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n return form_data", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def to_dict(self) -> Dict[str, Any]:\n\n fields: Dict[str, Any] = {}\n if hasattr(self, \"url\"):\n fields[\"url\"] = self.url\n if hasattr(self, \"last_check\"):\n fields[\"last_check\"] = self.last_check\n if hasattr(self, \"param1\"):\n fields[\"param1\"] = self.param1\n return fields", "def fields(self):\r\n pass", "def f(self):\r\n return self.fields()", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"allowedToCreateApps\": lambda n : setattr(self, 'allowed_to_create_apps', n.get_bool_value()),\n \"allowedToCreateSecurityGroups\": lambda n : setattr(self, 'allowed_to_create_security_groups', n.get_bool_value()),\n \"allowedToCreateTenants\": lambda n : setattr(self, 'allowed_to_create_tenants', n.get_bool_value()),\n \"allowedToReadBitlockerKeysForOwnedDevice\": lambda n : setattr(self, 'allowed_to_read_bitlocker_keys_for_owned_device', n.get_bool_value()),\n \"allowedToReadOtherUsers\": lambda n : setattr(self, 'allowed_to_read_other_users', n.get_bool_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"permissionGrantPoliciesAssigned\": lambda n : setattr(self, 'permission_grant_policies_assigned', n.get_collection_of_primitive_values(str)),\n }\n return fields", "def audit_fields(elem, fields):\r\n errs = []\r\n parsed = {}\r\n for field, field_type, dict_field in fields:\r\n if field not in elem.attrib:\r\n errs.append(('missing value', field))\r\n else:\r\n value = ensure_type(elem.get(field), field_type)\r\n if not value:\r\n errs.append(('wrong type', field))\r\n else:\r\n parsed[dict_field] = value\r\n \r\n if errs:\r\n parsed = None\r\n return parsed, errs", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def get_dict_from_db(key, fields=[]):\n dataStr = get_from_db(key=key)\n dataObj = json.loads(dataStr)\n if not fields:\n return dataObj\n else:\n return {field: dataObj[field] for field in fields}", "def get_form_fields(infile):\n infile = PdfFileReader(open(infile, 'rb'))\n fields = _getFields(infile)\n return OrderedDict((k, v.get('/V', '')) for k, v in fields.items())", "def to_dict(self):\n dct = dict(zip(self._fields, self))\n dct['type'] = type(self).__name__\n return dct", "def fields_dict(slist, type=SList):\n fields = slist.fields()\n names = fields.pop(0)\n out = collections.OrderedDict()\n for i, name in enumerate(names[:-1]):\n out[name] = type(slist.fields(i)[1:])\n out[names[-1]] = type([' '.join(f[i + 1:]) for f in fields])\n return out", "def _get_field_enum_info(cls):\n return {}", "def parse_fields(self, request, fields=None, skip=set(), additional=[]):\n fields = fields or self.fields\n fields = [f for f in fields if f.name not in skip]\n fields.extend(additional)\n result = dict()\n for field in fields:\n try:\n result[field.name] = field.get_value(request, self)\n except ValueError, msg:\n raise HTTP_BAD_REQUEST(str(msg))\n return result", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .identity_provider_base import IdentityProviderBase\n\n from .identity_provider_base import IdentityProviderBase\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"certificateData\": lambda n : setattr(self, 'certificate_data', n.get_str_value()),\n \"developerId\": lambda n : setattr(self, 'developer_id', n.get_str_value()),\n \"keyId\": lambda n : setattr(self, 'key_id', n.get_str_value()),\n \"serviceId\": lambda n : setattr(self, 'service_id', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .artifact import Artifact\n from .host import Host\n\n from .artifact import Artifact\n from .host import Host\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"domain\": lambda n : setattr(self, 'domain', n.get_str_value()),\n \"firstSeenDateTime\": lambda n : setattr(self, 'first_seen_date_time', n.get_datetime_value()),\n \"host\": lambda n : setattr(self, 'host', n.get_object_value(Host)),\n \"lastSeenDateTime\": lambda n : setattr(self, 'last_seen_date_time', n.get_datetime_value()),\n \"name\": lambda n : setattr(self, 'name', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def field_names(self):\n return self.base_field_names() + list(self.data.keys())", "def Fields(self):\n return self._fields", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def get_fields():\n return jsonify(result=Tree.fields())", "def get_field_names(self):\n return self._keys", "def get_fields(self):\n\n fields = {}\n LOGGER.debug('Treating all columns as string types')\n if os.path.exists(self.data):\n with open(self.data) as src:\n data = json.loads(src.read())\n for key, value in data['features'][0]['properties'].items():\n if isinstance(value, float):\n type_ = 'number'\n elif isinstance(value, int):\n type_ = 'integer'\n else:\n type_ = 'string'\n\n fields[key] = {'type': type_}\n else:\n LOGGER.warning(f'File {self.data} does not exist.')\n return fields", "def serialize(self): \n \n ret = {}\n\n for (name, field) in inst._fields:\n ret[name] = field.serialze(self, type(self))\n\n return ret", "def db_fields(self):", "def iter_fields(fields):\n if isinstance(fields, dict):\n return ((k, v) for k, v in dict.iteritems(fields))\n\n return ((k, v) for k, v in fields)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def _get_simple_fields(cls) -> dict:\n return {\n name: tp\n for name, tp in cls._annotations().items()\n if AnnotationWrapper(tp).is_simple_in_opt_and_not_opt\n }", "def get_fields(self):\n return self._devices.keys()", "def to_dict(self):\n fields = {}\n for label in self.fields:\n field = getattr(self, label)\n if not field is None:\n fields[label] = field.url\n return fields", "def _get_fields(self, xsession, freq, subject_id=None,\n visit_id=None, derived=False):\n fields = []\n for name, value in xsession.fields.items():\n fields.append(Field(\n name=name, value=value, derived=derived,\n frequency=freq, subject_id=subject_id,\n visit_id=visit_id, archive=self))\n return sorted(fields)", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def _GetFieldValues(\n self, output_mediator, event, event_data, event_data_stream, event_tag):\n return {}", "def _list_fields(self):\n return list(self._state.keys())", "def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields", "def getSubfield(h, field, Lsubs):\n\td = {}\t\n\tfor k in Lsubs:\n\t\ttheElement = h.documentElement.getElementsByTagName(field)[0]\n\t\td[k] = str(theElement.getAttribute(k))\n\treturn d", "def get_fields(form, args):\n bound_fields = {}\n fields = []\n for field in form:\n bound_fields.update({field.name: field})\n for field_name in args.split(','):\n if field_name in bound_fields:\n fields.append(bound_fields[field_name])\n return fields" ]
[ "0.7502895", "0.7173949", "0.69838166", "0.6890921", "0.68630266", "0.6766923", "0.67547655", "0.673777", "0.673483", "0.67211217", "0.6610475", "0.6577992", "0.65449065", "0.6493254", "0.6487551", "0.6482813", "0.6455735", "0.6454793", "0.6442877", "0.64390624", "0.6432849", "0.64240086", "0.6418225", "0.6386792", "0.63575774", "0.6355731", "0.6348506", "0.6332452", "0.63318396", "0.6324723", "0.6321807", "0.6316707", "0.63103276", "0.63102597", "0.6301188", "0.6300142", "0.62898785", "0.6263602", "0.6254765", "0.6251452", "0.6249643", "0.62402195", "0.6234878", "0.62175626", "0.62123406", "0.62079906", "0.6199083", "0.61880994", "0.6183913", "0.61831385", "0.61820775", "0.61488724", "0.61355007", "0.61337656", "0.6126234", "0.6122585", "0.61204684", "0.6095863", "0.6084397", "0.6082675", "0.6074319", "0.6070781", "0.60692215", "0.6061221", "0.6061221", "0.60600966", "0.6052149", "0.6045156", "0.60399306", "0.6028127", "0.6027836", "0.6016704", "0.6011181", "0.59986573", "0.59963304", "0.59852105", "0.59851104", "0.5983441", "0.5974599", "0.5967238", "0.5947439", "0.59384316", "0.5936769", "0.5936658", "0.59350044", "0.5933479", "0.5933185", "0.5932491", "0.5929872", "0.59256566", "0.59171236", "0.59169096", "0.5912795", "0.5907422", "0.5886821", "0.58851343", "0.58774155", "0.5874746", "0.5871836", "0.5865984", "0.5851884" ]
0.0
-1
returns Post or Comment instance
def from_tx(cls, txcomment, session=None, **kwargs): if txcomment.is_comment: obj_cls = Comment cls_name = 'Comment' elif txcomment.is_post: obj_cls = Post cls_name = 'Post' else: raise ValueError('txcomment must by either post or comment') prepared = cls.prepare_from_tx(txcomment, session=session, **kwargs) logger.debug('%s.add: tx: %s prepared:%s', cls_name, txcomment, prepared) return obj_cls(**prepared)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)", "def _get_post(self):\n return self.get_object().content_object", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "def get_post(post_pk):\n where = \"WHERE pk = ?\"\n values = (post_pk, )\n return Post.select_one(where, values)", "def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n raise Http404", "def first_post(self):\r\n try:\r\n return self.post_set.all()[0]\r\n except IndexError:\r\n return None", "def get_model(cls):\n return Comment", "def parent(self) -> Comment | praw.models.Submission:\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split(\"_\", 1)[1])\n parent._submission = self.submission\n return parent", "def get(self, title):\n post = get_a_post(title)\n if not post:\n api.abort(404)\n else:\n return post", "def get_or_new_post(m, wp, doc):\n\n post = find_post(wp, doc.identifier)\n\n if post:\n action = lambda post: EditPost(post.id, post)\n else:\n prt(f\"Creating new post; could not find identifier '{doc.identifier}' \")\n action = lambda post: NewPost(post)\n post = WordPressPost()\n\n set_custom_field(post, 'identifier', doc.identifier)\n set_custom_field(post, 'name', doc.name)\n set_custom_field(post, 'nvname', doc.nonver_name)\n\n if not m.args.no_op:\n\n r = wp.call(action(post))\n wp = get_wp(m)\n for i in range(4):\n post = find_post(wp, doc.identifier)\n\n if post is not None:\n break\n print(\"HERE!\", post)\n sleep(1)\n else:\n err(\"Could not find post after creating it\", r, post)\n\n return post", "def get_post_or_page(slug=None, id=None):\n if id:\n try:\n return Post.objects.public().get(id=id)\n except Post.DoesNotExist:\n pass\n\n elif slug:\n try:\n return Post.objects.public().get(slug=slug)\n except Post.DoesNotExist:\n pass\n\n return ''", "def get_object(self, id):\n try:\n return Comment.objects.get(id=id)\n except Comment.DoesNotExist:\n raise Http404", "def latest_post(self):\r\n try:\r\n return self.post_set.latest('post_date')\r\n except Post.DoesNotExist:\r\n return None", "def get_content_object(self, **kwargs):\r\n return self.content_object", "def object(self):\n if not self.initial.get('content_type'):\n return None\n if not self.initial.get('object_id'):\n return None\n return self.initial.get('content_type').get_object_for_this_type(\n pk=self.initial.get('object_id')\n )", "def load_post_by_permalink(self, permalink):\n post = None\n posts = self.session.query(Post).filter(Post.permalink == permalink).all()\n if len(posts) > 0:\n post = posts[0]\n return post", "def get_post_view(self, instance):\n \n return instance.postview.view", "def specific(self):\n\n specific_type = ContentType.objects.get_for_id(self.specific_type_id)\n model_class = specific_type.model_class()\n if model_class is None:\n return self\n elif isinstance(self, model_class):\n return self\n else:\n return specific_type.get_object_for_this_type(id=self.id)", "def get_comment_object(self, site_id=None):\n if not self.is_valid():\n raise ValueError(\"get_comment_object may only be called on valid forms\")\n\n CommentModel = self.get_comment_model()\n new = CommentModel(**self.get_comment_create_data(site_id=site_id))\n new = self.check_for_duplicate_comment(new)\n\n return new", "def get_permisson_object(self):\n return self.blog", "def get_permisson_object(self):\n return self.blog", "def get_permisson_object(self):\n return self.blog", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, question=self.kwargs['pk'])\n self.check_object_permissions(self.request, obj)\n return obj", "def dangerously_get_post(post_id: str):\n return Post.objects.get(eid=post_id)", "def comment_to_object(self, comment, post_author_id=None):\n # the message_tags field is different in comment vs post. in post, it's a\n # dict of lists, in comment it's just a list. so, convert it to post style\n # here before running post_to_object().\n comment = dict(comment)\n comment['message_tags'] = {'1': comment.get('message_tags', [])}\n\n obj = self.post_to_object(comment)\n if not obj:\n return obj\n\n obj['objectType'] = 'comment'\n\n match = self.COMMENT_ID_RE.match(comment.get('id', ''))\n if match:\n post_author, post_id, comment_id = match.groups()\n obj['url'] = self.comment_url(post_id, comment_id,\n post_author_id=post_author_id)\n obj['inReplyTo'] = [{'id': self.tag_uri(post_id)}]\n\n return self.postprocess_object(obj)", "def get_post(id, check_author=True):\r\n cur = get_db().cursor()\r\n cur.execute(\r\n 'SELECT p.id, title, body, created, author_id, username'\r\n ' FROM novel.post p JOIN novel.user u ON p.author_id = u.id'\r\n ' WHERE p.id = %s',id )\r\n\r\n post = cur.fetchone()\r\n if post is None:\r\n abort(404, \"Post id {0} doesn't exist.\".format(id))\r\n\r\n if check_author and post['author_id'] != g.user['id']:\r\n abort(403)\r\n\r\n return post", "def get(cls, *args, **kwargs) -> object or None:\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n # if objects does not exist, we use None\n return None", "def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext):\n\n post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility)\n comment = Comment.objects.create(comment = ctext, post = post, author = cauthor)\n return (post, comment)", "def get_by_id(data_base, id, commit_to_db=True):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT * FROM post WHERE id = {id}\")\n fields = cursor.fetchone()\n cursor.close()\n if commit_to_db:\n fields['commit_to_db'] = commit_to_db\n try:\n return Post(**fields)\n except TypeError:\n return", "def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def get_question(self):\n if self.is_question:\n return self\n elif self.is_answer or self.is_comment and self.parent.is_question:\n return self.parent\n elif self.is_comment and self.parent.is_answer:\n return self.parent.parent\n raise IntegrityError('Content object {} is orphan.'.format(self.pk))", "def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(CommentCreate, self).get_context_data(**kwargs)\n # Get the blog from id and add it to the context\n context['post'] = get_object_or_404(Post, pk = self.kwargs['pk'])\n return context", "def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(PostCommentCreate, self).get_context_data(**kwargs)\n # Get the blog from id and add it to the context\n context['post'] = get_object_or_404(Post, pk=self.kwargs['pk'])\n return context", "def getRedditInstance():\r\n\r\n return praw.Reddit(client_id=REDDIT_CLIENT_ID,\r\n client_secret=REDDIT_CLIENT_SECRET,\r\n user_agent=REDDIT_USER_AGENT)", "def get_content_object(self):\r\n return self.content_object", "def get_comment_model(self):\n return get_model()", "def get_object(self):\n queryset = self.get_queryset()\n\n model = self.get_model()\n obj = queryset.get(get_primary_keys(model, self.kwargs))\n\n if not obj:\n raise Http404('No %s matches the given query.' % model.__name__)\n\n return obj", "def get_post_model():\n from django.conf import settings\n\n try:\n from django.apps import apps\n get_model = apps.get_model\n except ImportError:\n from django.db.models import get_model\n\n try:\n POST_MODEL = getattr(settings, 'STARDATE_POST_MODEL')\n except AttributeError:\n raise NotImplementedError('STARDATE_POST_MODEL is not defined.')\n\n try:\n app_label, model_name = POST_MODEL.split('.')\n except ValueError:\n raise ImproperlyConfigured(\"STARDATE_POST_MODEL must be of the form 'app_label.model_name'\")\n\n post_model = get_model(app_label, model_name)\n if post_model is None:\n raise ImproperlyConfigured(\"STARDATE_POST_MODEL refers to model '%s' that has not been installed\" % POST_MODEL)\n return post_model", "def load_post_by_id(self, id):\n post = None\n posts = self.session.query(Post).filter(Post.id == id).all()\n if len(posts) > 0:\n post = posts[0]\n return post", "def get_object(self):\n obj = get_object_or_404(Article, slug=self.kwargs[\"slug\"])\n self.check_object_permissions(self.request, obj)\n return obj", "def get_object(self, pid=None, type=None, create=None):\n objtype = type or self.default_object_type\n\n if pid is None:\n if create is None:\n create = True\n else:\n if create is None:\n create = False\n\n return objtype(self.api, pid, create,\n default_pidspace=self.default_pidspace)", "def get_reply(self, parent, child):\n try:\n reply = CommentReply.objects.get(\n pk=child,\n comment_to=parent)\n except CommentReply.DoesNotExist:\n raise exceptions.NotFound(\n f'Comment reply of ID {child} nonexistent'\n )\n\n return reply", "def get_instance(self):\n return {\n 'id': self.id,\n 'text': [self.text],\n 'media': self.media,\n 'quick_replies': self.quick_replies,\n 'next': self.next_nodes,\n 'pre': self.id,\n 'attribute': self.attribute\n }", "def get_object(self):\n return self._object", "def get_model(self):\n\t\treturn self.object.__class__", "def instance(self):\n return self.__instance", "def get_object(self):\n pk = self.kwargs.get('id')\n return get_object_or_404(Book, pk=pk)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def _get_instance(identifier):\n # noinspection PyBroadException\n try:\n app_label, model, object_pk = identifier.split('.', maxsplit=2)\n # we don't expect to find anything, so don't log\n if object_pk != 'None':\n if object_pk == OBJECT_DOES_NOT_EXIST:\n raise ObjectDoesNotExist()\n content_type = ContentType.objects.get_by_natural_key(app_label, model)\n return content_type.get_object_for_this_type(pk=object_pk)\n except ContentType.DoesNotExist:\n logging.warning(f'Could not find content type for {identifier!r}')\n except ObjectDoesNotExist:\n logging.warning(f'Could not find related object for {identifier!r}')\n except DatabaseError: # don't mask these\n raise\n except Exception:\n logging.exception(f'Could not get related object for {identifier!r}', log_function=logging.error)", "def retrieve(self, request, pk=None):\n\n\n \n\n\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/Posts/2\n #\n # The `2` at the end of the route becomes `pk`\n post = Post.objects.get(pk=pk)\n reactions = Reaction.objects.all()\n\n # Creates an empty list for reactions custom property set in model, and then filters through postReactions to provide objects with a\n # key/value pair of reaction label/number of that reaction the post has \n\n post.reactions=[]\n\n for reaction in reactions:\n number_of_reactions = PostReaction.objects.filter(post=post, reaction=reaction).count()\n post.reactions.append({reaction.label: number_of_reactions})\n\n associated_tags=Tag.objects.filter(related_post__post=post)\n user = RareUser.objects.get(user=request.auth.user)\n\n all_tags=serializer=TagSerializer(associated_tags, many=True, context={'request',request})\n my_post=serializer = PostSerializer(post, context={'request': request})\n \n single_post={}\n single_post['post']=my_post.data\n single_post['tags']=all_tags.data\n if user == post.user:\n single_post['myPosts']=True \n\n return Response(single_post)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def get_instance(self, instance):\n return self._get(_instance.Instance, instance)", "def get(self, id):\t\t\n\t\ttry:\n\t\t\treturn post_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tpost_space.abort(400, e.args[0], status = \"Could not get post\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tpost_space.abort(500, e.args[0], status = \"Could not get post\", statusCode = \"500\")", "def get(self, *args, **kwargs):\n post_pk = self.kwargs['post_pk']\n post = get_object_or_404(Post, pk=post_pk)\n comments = self.queryset.filter(post=post.pk)\n serializer = self.serializer_class(comments, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_object(self):\n # read the URL data values into variables\n astronaut_pk = self.kwargs['astronaut_pk']\n message_pk = self.kwargs['message_pk']\n\n # find the SendMessage object, and return it\n st_cfh = SendMessage.objects.get(pk=message_pk)\n return st_cfh", "def get_or_create(self, **kwargs):\n kwargs = self._preprocess(**kwargs)\n found = self.first(**kwargs)\n if found is not None:\n return found\n\n new = self.create(**kwargs)\n return new", "def get(self, post_id=None):\n\n if post_id:\n post = Post.query.filter_by(id=post_id).first()\n if not post:\n abort(404)\n return post\n else:\n args = parsers.post_get_parser.parse_args()\n page = args['page'] or 1\n\n # Return the posts with user.\n if args['user']:\n user = User.query.filter_by(username=args['user']).first()\n if not user:\n abort(404)\n posts = user.posts.order_by(\n Post.publish_date.desc()).paginate(page, 30)\n # Return the posts\n else:\n posts = Post.query.order_by(\n Post.publish_date.desc()).paginate(page, 30)\n\n return posts.items", "def get_object(self, id=None):\n if id is None and self.kwargs.get('field') == 'id':\n id = self.kwargs.get('constraint')\n self.object = self.get_model_obj().objects.get(pk=id)\n return self.object", "def get_post(post_id, check_author=True):\n post = DB.session.query(Post).get(post_id)\n\n # Caso não exista um post com este ID, Erro 404\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(post_id))\n\n # Caso o usuário não é o mesmo que publicou originalmente o post, Erro 403\n if check_author and post.author != g.user:\n abort(403)\n # abort -> levanta uma exceção especial que retorna um código de status HTTP.\n # É necessário um argumento opcional para vir com o erro, senão vem uma mensagem\n # padrão\n # mais em: https://flask.palletsprojects.com/en/1.1.x/api/#flask.abort\n\n return post", "def get_object(self):\n if not self.user.is_authenticated():\n raise Http404('Access denied')\n self.url_name = self.request.resolver_match.url_name\n if self.url_name == 'sticker-detail':\n return Sticker.objects.get(\n board__desk__owner__user=self.user,\n board__prefix=self.kwargs['prefix'],\n sequence=self.kwargs['sequence']\n )\n elif self.url_name == 'board-comments':\n return Board.objects.get(\n desk__owner__user=self.user,\n sequence=self.kwargs['board_sequence']\n )\n elif self.url_name == 'sprint-comments':\n return Sprint.objects.get(\n number=self.kwargs['sprint_number'],\n board__desk__owner__user=self.user,\n board__sequence=self.kwargs['board_sequence']\n )", "def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]", "async def get_one(self, where: t.Mapping[str, t.Any]) -> t.Optional[Model]:\n\n data = await self.collection.find_one(where)\n return self.model_class(**data) if data else None", "def as_real_class(self):\n model = self.content_type.model_class()\n if model == Defense:\n return self\n return model.objects.get(id=self.id)", "def save_post(self, post):\n return self.collection.insert_one(post.serialize())", "def get_post(self, postid):\n return self.execute('metaWeblog.getPost', postid, self.username, self.password)", "def instance(self):\n return self._instance", "def delete_post(self: User, post_id: str) -> Optional[Post]:\n post = dangerously_get_post(post_id)\n if self != post.author:\n raise UnauthorizedAccess()\n\n # do not nullify the user to keep consistent with the below implementation\n nullify_notifications(post.make_href(), post.author)\n\n # we do not nullify the author in database for a post\n # so that a \"skeleton\" is left on home and profile\n post.content = ''\n post.deleted = True\n post.reshareable = False\n post.media_list = []\n # TODO: remove poll both on here and on polls collection\n post.save()\n\n if exists_in_post_cache(post.id):\n # only set in post cache if it already exists\n # post cache should only have reshared posts so it should not cache any deleted post\n set_in_post_cache(post)\n\n return post", "def get_stream(self):\n return Post.select().where(\n (Post.user == self)\n )", "def create_comment(post, author, content):\n return Comment.objects.create(post=post, author=author, content=content)", "def get_object(self):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user=self.request.user)\n return p", "def get_single_post(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n token = get_object_or_404(Token, key=request.data[\"token\"])\n post = Post.objects.get(pk=request.data[\"post_id\"])\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n return Response({\"success\": 87,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 88})\n else:\n return Response({\"error\": 17})", "def get_version(self, version):\n\n if self.version == version:\n return self\n\n #version_slug = self.get_version_slug(version)\n\n parent_version_id = self.get_parent_version().id\n\n try:\n versioned_post = Post.objects.get(blog=self.blog, version_id=parent_version_id, version=version)\n return versioned_post\n except Post.DoesNotExist:\n versioned_post = Post(version_id=parent_version_id, published=False, blog=self.blog, version=version)\n versioned_post.save()\n\n copy_attribs = ['title',\n 'tags_text',\n 'content',\n 'content_markup_type',\n 'allow_comments',\n 'published',\n 'display_time',\n 'slug']\n\n for attrib in copy_attribs:\n setattr(versioned_post, attrib, getattr(self, attrib))\n versioned_post.save()\n\n return versioned_post", "def get_object_or_child_by_type(self, *types):\n\n objects = self.get_objects_or_children_by_type(*types)\n return objects[0] if any(objects) else None", "def single(self):\r\n return single.Single(self)", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))", "def get_content_object_by_model(self, model_or_instance):\n session_element = self.get_elements_by_model(model_or_instance).order_by('-created_at').first()\n if not session_element:\n return None\n\n return session_element.content_object", "def get_blog_from_ID(cls, blog_id): #we won't have access to this 'self' unless we create an object first\n blog_dict = Database.find_one(collection='blogs',\n query={'blog_id':blog_id})\n return Blog.__dict_to_class(blog_dict)\n # return cls(author = blog_dict['author'],\n # title = blog_dict['title'],\n # description = blog_dict['description'],\n # blog_id = blog_dict['blog_id']\n # )", "def post_to_object(self, post, remove_id_prefix=False):\n id = post.get('id')\n if not id:\n return {}\n\n post_type = post.get('type')\n status_type = post.get('status_type')\n url = self.post_url(post)\n picture = post.get('picture')\n display_name = None\n message = (post.get('message') or post.get('story') or\n post.get('description') or post.get('name'))\n\n data = post.get('data', {})\n for field in ('object', 'song'):\n obj = data.get(field)\n if obj:\n id = obj.get('id')\n post_type = obj.get('type')\n url = obj.get('url')\n display_name = obj.get('title')\n\n object_type = OBJECT_TYPES.get(post_type)\n author = self.user_to_actor(post.get('from'))\n link = post.get('link', '')\n\n if link.startswith('/gifts/'):\n object_type = 'product'\n if not object_type:\n if picture and not message:\n object_type = 'image'\n else:\n object_type = 'note'\n\n obj = {\n 'id': self.tag_uri(str(id)),\n 'objectType': object_type,\n 'published': util.maybe_iso8601_to_rfc3339(post.get('created_time')),\n 'updated': util.maybe_iso8601_to_rfc3339(post.get('updated_time')),\n 'author': author,\n 'content': message,\n # FB post ids are of the form USERID_POSTID\n 'url': url,\n 'image': {'url': picture},\n 'displayName': display_name,\n 'fb_object_id': post.get('object_id'),\n }\n\n privacy = post.get('privacy', {})\n if isinstance(privacy, dict):\n privacy = privacy.get('value')\n if privacy is not None:\n # privacy value '' means it doesn't have an explicit audience set, so i\n # *think* it inherits from its parent. TODO: use that value as opposed to\n # defaulting to public.\n public = privacy.lower() in ('', 'everyone', 'open')\n obj['to'] = [{'objectType': 'group',\n 'alias': '@public' if public else '@private'}]\n\n # tags and likes\n tags = itertools.chain(post.get('to', {}).get('data', []),\n post.get('with_tags', {}).get('data', []),\n *post.get('message_tags', {}).values())\n obj['tags'] = [self.postprocess_object({\n 'objectType': OBJECT_TYPES.get(t.get('type'), 'person'),\n 'id': self.tag_uri(t.get('id')),\n 'url': self.object_url(t.get('id')),\n 'displayName': t.get('name'),\n 'startIndex': t.get('offset'),\n 'length': t.get('length'),\n }) for t in tags]\n\n obj['tags'] += [self.postprocess_object({\n 'id': self.tag_uri('%s_liked_by_%s' % (id, like.get('id'))),\n 'url': url,\n 'objectType': 'activity',\n 'verb': 'like',\n 'object': {'url': url},\n 'author': self.user_to_actor(like),\n 'content': 'likes this.',\n }) for like in post.get('likes', {}).get('data', [])]\n\n # \"See Original\" links\n post_actions = post.get('actions',[])\n see_orig_actions = (act for act in post_actions\n if act.get('name', '').lower() in SEE_ORIGINAL_ACTIONS)\n obj['tags'] += [self.postprocess_object({\n 'objectType': 'article',\n 'url': act.get('link'),\n 'displayName': act.get('name')\n }) for act in see_orig_actions]\n\n # is there an attachment? prefer to represent it as a picture (ie image\n # object), but if not, fall back to a link.\n att = {\n 'url': link if link else url,\n 'image': {'url': picture},\n 'displayName': post.get('name'),\n 'summary': post.get('caption'),\n 'content': post.get('description'),\n }\n\n if (picture and picture.endswith('_s.jpg') and\n (post_type == 'photo' or status_type == 'added_photos')):\n # a picture the user posted. get a larger size.\n att.update({\n 'objectType': 'image',\n 'image': {'url': picture[:-6] + '_o.jpg'},\n })\n obj['attachments'] = [att]\n elif link and not link.startswith('/gifts/'):\n att['objectType'] = 'article'\n obj['attachments'] = [att]\n\n # location\n place = post.get('place')\n if place:\n id = place.get('id')\n obj['location'] = {\n 'displayName': place.get('name'),\n 'id': id,\n 'url': self.object_url(id),\n }\n location = place.get('location', None)\n if isinstance(location, dict):\n lat = location.get('latitude')\n lon = location.get('longitude')\n if lat and lon:\n obj['location'].update({\n 'latitude': lat,\n 'longitude': lon,\n # ISO 6709 location string. details: http://en.wikipedia.org/wiki/ISO_6709\n 'position': '%+f%+f/' % (lat, lon),\n })\n elif 'location' in post:\n obj['location'] = {'displayName': post['location']}\n\n # comments go in the replies field, according to the \"Responses for\n # Activity Streams\" extension spec:\n # http://activitystrea.ms/specs/json/replies/1.0/\n comments = post.get('comments', {}).get('data')\n if comments:\n items = [self.comment_to_object(c) for c in comments]\n obj['replies'] = {\n 'items': items,\n 'totalItems': len(items),\n }\n\n return self.postprocess_object(obj)", "def get_instance(self):\n try:\n return self._instance\n except AttributeError:\n self._instance = self._decorated()\n return self._instance", "def _get_instance(self):", "def _get_instance(self):", "def get(cls):\n return cls.instance", "def getSubmission(permalink, reddit):\r\n\r\n submissionUrl = REDDIT_URL_PREFIX + permalink\r\n submission = reddit.submission(url=submissionUrl)\r\n return submission", "def get(self, id):\n post = Post.query.filter_by(id=id).first()\n if post is None:\n return { 'message': 'Post does not exist'}, 404\n\n return post_schema.dump(post)", "def get(self, post_id):\n key = db.Key.from_path('Posts', int(post_id))\n post = db.get(key)\n user = self.get_active_user()\n edit_id = self.request.get('edit')\n delete_id = self.request.get('delete')\n\n if not post:\n self.error(404)\n return\n\n comments = db.GqlQuery(\"\"\"select * from Comments where\n post_id = %s\"\"\" % str(post.key().id()))\n if not user:\n self.render_permalink(post=post, comments=comments)\n return\n if delete_id:\n comment = Comments.get_by_id(int(delete_id))\n comment.delete()\n\n owned_by_user = int(user.key().id()) == post.submitter_id\n likes = int(user.key().id()) in post.liked_by\n self.render_permalink(user=user,\n post=post,\n comments=comments,\n edit_id=edit_id,\n owns=owned_by_user)", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def create_comment_immediately_below_post():\n post = create_a_post()\n comment = Comment.create(post=post, body=\"I'm a comment right below a post\")\n comment.save()\n return comment", "def __repr__(self):\n\t\treturn \"<Post #{}: {}>\".format(self.id, self.content)", "def get_article(self, slug):\n\t\tarticle = Blog.objects.get(slug=slug)\n\t\treturn article", "def get_object ( self, object ):\n return object", "def load_post_with_comments_by_permalink(self, permalink):\n post = self.load_post_by_permalink(permalink)\n if post:\n comments = self.load_comments_by_post_id(post.id)\n post.comments = comments\n return post", "def render_post(self, post, **params):\n\n \"\"\"\n TODO\n To keep the code more object-oriented I wanted to have the\n render_comments() method inside the PostCommentsHandler() class.\n But then I get following error:\n AttributeError: 'NoneType' object has no attribute 'cookies'.\n Thats why I put render_comments into this PostHandler Class.\n \"\"\"\n # rendered_comments = PostCommentsHandler().render_comments()\n\n if \"comment_to_edit\" in params:\n rendered_comments = self.render_comments(\n post=post, comment_to_edit=params['comment_to_edit'])\n else:\n rendered_comments = self.render_comments(\n post=post, comment_to_edit=None)\n\n return self.render_str(\"blog/singlepost.html\",\n p=post,\n comments=rendered_comments)", "def get_object(self, url_id, user_id):\n try:\n return Link.objects.get(id=url_id, user=user_id)\n except Link.DoesNotExist:\n return None", "def get_object(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n\n assert lookup_url_kwarg in self.kwargs, (\n 'Expected view %s to be called with a URL keyword argument '\n 'named \"%s\". Fix your URL conf, or set the `.lookup_field` '\n 'attribute on the view correctly.' %\n (self.__class__.__name__, lookup_url_kwarg)\n )\n\n filter_kwargs = {\n \"parent_id\": self.kwargs[\"boards_pk\"],\n self.lookup_field: self.kwargs[lookup_url_kwarg]\n }\n obj = get_object_or_404(self.get_queryset(), **filter_kwargs)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def getOrNone(cls, **kwargs):\n try:\n return cls.objects.get(**kwargs)\n except cls.DoesNotExist:\n return None", "def post(self):\n post_id = int(self.request.get('post_id'))\n post = Posts.get_by_id(post_id)\n comment = self.request.get('comment')\n submitter_id = self.get_active_user().key().id()\n\n if submitter_id:\n comment = Comments(post_id=post_id, content=comment,\n submitter_id=submitter_id)\n comment.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)", "def single(request, entry_pk=None, comment_form=None):\n\n entry = get_object_or_404(BlogEntry, pk=entry_pk)\n\n if not comment_form:\n comment_form = BlogCommentForm(creator=request.user, blog=entry)\n\n data = {'entry': entry, 'blog_info': get_blog_info(),\n 'comment_form': comment_form}\n return render_to_response('blog/single.html', data,\n context_instance=get_rq(request))", "def __call__(self, *args, **kwargs):\n if not self.instance:\n self.instance = super().__call__(*args, **kwargs)\n return self.instance" ]
[ "0.69633675", "0.66851956", "0.6488416", "0.6333677", "0.6118673", "0.6012395", "0.59859", "0.59783304", "0.5883995", "0.5803353", "0.5775314", "0.57464737", "0.5668173", "0.559327", "0.5543723", "0.5539568", "0.5487841", "0.5474023", "0.54665846", "0.54636973", "0.54636973", "0.54636973", "0.5441957", "0.5441595", "0.54411215", "0.5432069", "0.54180294", "0.5401476", "0.53904337", "0.5388696", "0.538525", "0.53608954", "0.5355667", "0.53514016", "0.5349564", "0.53346187", "0.5328859", "0.53187567", "0.53171355", "0.53091043", "0.529173", "0.52765393", "0.52755094", "0.5269531", "0.5259231", "0.52369153", "0.52350557", "0.52173555", "0.5217249", "0.5210394", "0.5199384", "0.51986814", "0.5194926", "0.5193043", "0.5190153", "0.5188403", "0.5183425", "0.51775503", "0.51724285", "0.5166925", "0.51626176", "0.5155937", "0.5148534", "0.5144395", "0.514401", "0.51427335", "0.51417136", "0.513737", "0.5112331", "0.51084644", "0.5106891", "0.51017785", "0.51015484", "0.51013094", "0.5097517", "0.50964886", "0.50944984", "0.5091432", "0.50900644", "0.5087463", "0.5073647", "0.5073647", "0.50709337", "0.50707275", "0.506839", "0.5060427", "0.5052046", "0.5050597", "0.50487083", "0.5041705", "0.5033676", "0.5033576", "0.50305295", "0.5026749", "0.50185156", "0.5011669", "0.5011669", "0.5003595", "0.50016165", "0.4999658", "0.49852306" ]
0.0
-1
returns unique Post or Comment instance
def as_unique_from_tx(cls, txcomment, session=None, **kwargs): prepared = cls.prepare_from_tx(txcomment, session=session, **kwargs) if txcomment.is_comment: obj_cls = Comment elif txcomment.is_post: obj_cls = Post else: raise ValueError('txcomment must by either post or comment') return obj_cls.as_unique(session, **prepared)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)", "def get_unique_instance(type_):\n global unique_object_id\n ret = None\n\n if type_ is tuple:\n ret = tuple([unique_object_id])\n\n unique_object_id += 1\n return ret", "def first_post(self):\r\n try:\r\n return self.post_set.all()[0]\r\n except IndexError:\r\n return None", "def get_post(post_pk):\n where = \"WHERE pk = ?\"\n values = (post_pk, )\n return Post.select_one(where, values)", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "def get_instance(self, name):\n return self.website.instance.id", "def get_post_id(self):\n return self.key.parent().id()", "def get_comment_object(self, site_id=None):\n if not self.is_valid():\n raise ValueError(\"get_comment_object may only be called on valid forms\")\n\n CommentModel = self.get_comment_model()\n new = CommentModel(**self.get_comment_create_data(site_id=site_id))\n new = self.check_for_duplicate_comment(new)\n\n return new", "def _get_post(self):\n return self.get_object().content_object", "def load_post_by_permalink(self, permalink):\n post = None\n posts = self.session.query(Post).filter(Post.permalink == permalink).all()\n if len(posts) > 0:\n post = posts[0]\n return post", "def dangerously_get_post(post_id: str):\n return Post.objects.get(eid=post_id)", "def get_or_new_post(m, wp, doc):\n\n post = find_post(wp, doc.identifier)\n\n if post:\n action = lambda post: EditPost(post.id, post)\n else:\n prt(f\"Creating new post; could not find identifier '{doc.identifier}' \")\n action = lambda post: NewPost(post)\n post = WordPressPost()\n\n set_custom_field(post, 'identifier', doc.identifier)\n set_custom_field(post, 'name', doc.name)\n set_custom_field(post, 'nvname', doc.nonver_name)\n\n if not m.args.no_op:\n\n r = wp.call(action(post))\n wp = get_wp(m)\n for i in range(4):\n post = find_post(wp, doc.identifier)\n\n if post is not None:\n break\n print(\"HERE!\", post)\n sleep(1)\n else:\n err(\"Could not find post after creating it\", r, post)\n\n return post", "def parent(self) -> Comment | praw.models.Submission:\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split(\"_\", 1)[1])\n parent._submission = self.submission\n return parent", "def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n raise Http404", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def latest_post(self):\r\n try:\r\n return self.post_set.latest('post_date')\r\n except Post.DoesNotExist:\r\n return None", "def get_disqus_id(self):\n return '{}_post_{}'.format(self.pk, slugify(self.title))", "def get_or_create(self, **kwargs):\n kwargs = self._preprocess(**kwargs)\n found = self.first(**kwargs)\n if found is not None:\n return found\n\n new = self.create(**kwargs)\n return new", "def get_object(id):", "def _get_instance_id(self):\n return self.__instance_id", "def get_post_or_page(slug=None, id=None):\n if id:\n try:\n return Post.objects.public().get(id=id)\n except Post.DoesNotExist:\n pass\n\n elif slug:\n try:\n return Post.objects.public().get(slug=slug)\n except Post.DoesNotExist:\n pass\n\n return ''", "def get_instance(self, name):\n return self.store.instance.id", "def get_post_id_for_permalink(self, permalink):\n post_id = None\n post_synopses = self.session.query(PostSynopsis).filter(PostSynopsis.permalink == permalink).all()\n if len(post_synopses) > 0:\n post_id = post_synopses[0].id\n return post_id", "def get_object(self):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user=self.request.user)\n return p", "def fetchone(self):\n row = self.cursor.fetchone()\n\n if row is None:\n return None\n\n if self.model.single:\n return self.__instance_from_db(self.model, row)\n else:\n return tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def get_object(self,pk):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user_id=pk)\n return p", "def get_post_id(self):\n return int(self.request.get('blog_id'))", "def save_current_post(entry):\n return current.insert_one(entry).inserted_id", "def _get_instance(identifier):\n # noinspection PyBroadException\n try:\n app_label, model, object_pk = identifier.split('.', maxsplit=2)\n # we don't expect to find anything, so don't log\n if object_pk != 'None':\n if object_pk == OBJECT_DOES_NOT_EXIST:\n raise ObjectDoesNotExist()\n content_type = ContentType.objects.get_by_natural_key(app_label, model)\n return content_type.get_object_for_this_type(pk=object_pk)\n except ContentType.DoesNotExist:\n logging.warning(f'Could not find content type for {identifier!r}')\n except ObjectDoesNotExist:\n logging.warning(f'Could not find related object for {identifier!r}')\n except DatabaseError: # don't mask these\n raise\n except Exception:\n logging.exception(f'Could not get related object for {identifier!r}', log_function=logging.error)", "def _get_id(self):\n return self.id", "async def _get_post_id(db, author, permlink):\n sql = \"SELECT id FROM hive_posts WHERE author = :a AND permlink = :p\"\n return await db.query_one(sql, a=author, p=permlink)", "def _object_get(self, pk):\n try:\n return self.model.objects.get(pk=pk)\n except self.model.DoesNotExist:\n raise DoesNotExist(self.model.__name__.lower(), primary_key=pk)", "def get_unique_node(name, node_type, meta_type):\n name = normalize_whitespace(name)\n node_handle = get_unique_node_handle(name, node_type, meta_type)\n node = node_handle.get_node()\n return node", "def single(self):\r\n return single.Single(self)", "def _unique(session, cls, queryfunc, constructor, kw, unique_key='name'):\n cache = getattr(session, '_unique_cache', None)\n if cache is None:\n session._unique_cache = cache = {}\n\n key = (cls, kw.get(unique_key))\n if key in cache:\n print(f'The {kw.get(unique_key)} {unique_key} is not unique, try something else!')\n return cache[key]\n else:\n with session.no_autoflush:\n q = session.query(cls)\n q = queryfunc(q, kw.get(unique_key))\n obj = q.first()\n if not obj:\n obj = constructor(**kw)\n session.add(obj)\n else:\n if unique_key == 'name':\n print(f'The {obj.name} {unique_key} is not unique, try something else!')\n else:\n print(f'The {obj.img_path} {unique_key} is not unique, try something else!')\n cache[key] = obj\n return obj", "def object(self):\n if not self.initial.get('content_type'):\n return None\n if not self.initial.get('object_id'):\n return None\n return self.initial.get('content_type').get_object_for_this_type(\n pk=self.initial.get('object_id')\n )", "async def get_post_id(db, author, permlink):\n sql = (\"SELECT id FROM hive_posts WHERE author = :a \"\n \"AND permlink = :p AND is_deleted = '0' LIMIT 1\")\n return await db.query_one(sql, a=author, p=permlink)", "def get_object(self, id):\n try:\n return Comment.objects.get(id=id)\n except Comment.DoesNotExist:\n raise Http404", "def getUniqueID(self):\n return self.unique_id", "def instance(self):\n return self.__instance", "def getId(self):\n # XXX-Aurel : this must be based on the GID definition\n # As GID in TioSafe case is unique, it must be used to get\n # the last ID of an inserted object (usefull for cases where\n # transactionnal operation is not provided like with prestashop)\n #raise ValueError, self.last_id\n return LastIdBrain.getId(self)", "def get_primary_id(self):", "def instance(self):\n return self._instance", "def save_post(self, post):\n return self.collection.insert_one(post.serialize())", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def get_by_id(data_base, id, commit_to_db=True):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT * FROM post WHERE id = {id}\")\n fields = cursor.fetchone()\n cursor.close()\n if commit_to_db:\n fields['commit_to_db'] = commit_to_db\n try:\n return Post(**fields)\n except TypeError:\n return", "def get_object(self):\n # read the URL data values into variables\n astronaut_pk = self.kwargs['astronaut_pk']\n message_pk = self.kwargs['message_pk']\n\n # find the SendMessage object, and return it\n st_cfh = SendMessage.objects.get(pk=message_pk)\n return st_cfh", "def get_objectID(self):\n return self.collection.uuid", "def get_id(self):\n return self[\"_id\"]", "def get_single_post(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n token = get_object_or_404(Token, key=request.data[\"token\"])\n post = Post.objects.get(pk=request.data[\"post_id\"])\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n return Response({\"success\": 87,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 88})\n else:\n return Response({\"error\": 17})", "def get_post(self, pid):\n return self.posts.find_one_and_update({'Id':pid}, {'$inc': {'ViewCount':1}})", "def load_post_by_id(self, id):\n post = None\n posts = self.session.query(Post).filter(Post.id == id).all()\n if len(posts) > 0:\n post = posts[0]\n return post", "def get_pk(self):\n return getattr(self, self.get_pk_name(), None)", "def get_thread(self):\n return Comment.objects.filter(path__startswith=self.get_root_path())", "def _get_instance(self):", "def _get_instance(self):", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)" ]
[ "0.65143263", "0.64079964", "0.6296819", "0.6184585", "0.59477484", "0.5869962", "0.5766771", "0.57316494", "0.5705596", "0.56982195", "0.5665292", "0.56357944", "0.56308913", "0.55791014", "0.55728555", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.5515035", "0.55004305", "0.54911715", "0.54792845", "0.5461303", "0.5427392", "0.54091847", "0.5396441", "0.5390748", "0.5385592", "0.5309908", "0.52984655", "0.5296277", "0.5290748", "0.5289184", "0.5283557", "0.52450836", "0.5226058", "0.52249473", "0.5216897", "0.51990527", "0.5186936", "0.51865846", "0.5183096", "0.5164829", "0.51608676", "0.5151187", "0.5147107", "0.5140432", "0.5135604", "0.5135604", "0.5132462", "0.51309013", "0.51308346", "0.51279956", "0.51259315", "0.51234967", "0.51133996", "0.51113045", "0.51100713", "0.51096386", "0.5104133", "0.5104133", "0.51022613", "0.51022613", "0.51022613", "0.51022613", "0.50998765", "0.50923234" ]
0.60853547
4
categories is an ordered dictionary
def __init__(self, position, area, categories, align=TOPLEFT, step=None, edge=DEFAULT_EDGE, colour=light_grey, deselect=True, button_labels=None): self.categories = categories if step is None: self.step = Button.default_height else: self.step = step self.deselect = deselect super().__init__(position, area, self.step * len(categories), align=align, edge=edge, button_size=self.step, colour=colour) self.button_tags = {} if button_labels is None: self.button_labels = {} else: self.button_labels = button_labels self.buttons = self.gen_buttons(categories) self.all_buttons = flatten(self.buttons) self.visible_buttons = list(self.buttons.keys()) self.update_display()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categories(self):\n\t\treturn (sorted(self.dictData.keys()))", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def list(self):\n return list(sorted(self.manager.data[\"category\"].keys()))", "def categories(self):\n pass", "def serialize_categories(md: Metadata) -> list:\n categories = []\n all_cat = md.categories.all()\n for cat in all_cat:\n category = OrderedDict()\n\n category[\"id\"] = cat.id\n category[\"type\"] = cat.type\n category[\"title_EN\"] = cat.title_EN\n category[\"description_EN\"] = cat.description_EN\n category[\"title_locale_1\"] = cat.title_locale_1\n category[\"description_locale_1\"] = cat.description_locale_1\n category[\"title_locale_2\"] = cat.title_locale_2\n category[\"description_locale_2\"] = cat.description_locale_2\n category[\"symbol\"] = cat.symbol\n category[\"online_link\"] = cat.online_link\n\n categories.append(category)\n\n return categories", "def setCategories(self, categories):\n vocabulary = dict(self.getCategoryVocabulary())\n self.categories = OrderedDict()\n for catId in categories:\n name = vocabulary.get(catId, None)\n if name is not None:\n self.categories[catId] = vocabulary[catId]\n else:\n # Sliently ignore that category id, it doesn't have a matching category name.\n # I apologize if you found this comment after hours of digging around code. \n pass", "def get_categories(self) -> tuple:\n return self.categories", "def getCategories(self):\n return self.categories.keys()", "def get_categories(mapping):\n categories = []\n \n for idx, name in mapping.items(): \n temp = {'id':idx, 'name':name, 'supercategory':'NA'}\n categories.append(temp)\n \n return categories", "def list_categories(self) -> List[Tuple[str, str]]:\n category_list = [(name, path) for name, path in self.category_map.items()]\n # Fix the order of category list.\n category_list.sort(key=lambda category: category[0])\n return category_list", "def get_categories_group(self):\n m = {}\n for post in self:\n for cat in post.Categories:\n if cat not in m:\n m[cat] = []\n m[cat].append(post)\n return m", "def categories_parser(categories):\n categories_parsed = {}\n for category in categories:\n id = category[\"id\"]\n label = category[\"name\"]\n categories_parsed[id] = label\n return categories_parsed", "def categories(self):\n\t\treturn self._categories", "def getCategories(self):\r\n return self.categories", "def categories(self):\n return { category: subcategories.keys() for category, subcategories in self.lib.items()}", "def get_categories():\n categories = app.preprocessed.uniq_categs\n result = {\n 'success': True,\n 'data': {\n 'categories': categories\n }\n }\n return jsonify(result)", "def categories(self):\n return self._data[\"categories\"]", "def _get_all_categories() -> dict:\n categories = database.fetchall(\"Categories\", \"id\", \"name_ua\")\n return categories", "def print_categories_list():\n\n categories = []\n for item in data:\n cat = item[\"category\"]\n\n if cat not in categories:\n categories.append(cat)\n\n print(categories) # print the list", "def category_names(self):\n return list(self.categories.keys())", "def category_list():\n categories = Category.objects.filter(active=True)\n return {'categories': categories}", "def sorted_categories(self):\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l", "def inject_categories():\n return {'categories': entities.Listing.CATEGORIES,\n 'categories_dict': entities.Listing.CATEGORIES_DICT}", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def categories(self):\n return self.__categories", "def getCategory():", "def categories(self):\n cur = self.con.execute('select category from cc');\n return [d[0] for d in cur]", "def get_categories_enumerated_key_map(self):\n return dict(enumerate([c.name for c in self.categories]))", "def Categories(self):\r\n return self._categories", "def add_categories(self, categories):\n categories_id = COCOTools.get_categories_id(self.coco[\"categories\"])\n cat_name = list(categories_id.keys())\n cat_id = list(categories_id.values())\n max_id = 0\n if cat_id:\n max_id = max(cat_id)\n for item in categories:\n name = item[\"name\"]\n id = item[\"id\"]\n if name in cat_name:\n continue\n max_id += 1\n item[\"id\"] = max_id # BUG:will assign to categories\n self.coco['categories'].append(item)", "def fill_category(self):\n cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n categories = dict()\n result = requests.get('https://fr.openfoodfacts.org/categories.json').json()\n for element in result['tags']:\n try:\n cursor.execute(\"INSERT INTO category (tag, name, url) VALUES (%s, %s, %s) RETURNING id, tag\",\n (element[\"id\"], element[\"name\"], element[\"url\"]))\n query_result = cursor.fetchone()\n categories.__setitem__(query_result[1], query_result[0])\n except self.conn.OperationalError:\n print(\"operation Error\")\n except self.conn.DataError:\n print(\"Data Error\")\n self.conn.commit()\n cursor.close()\n return categories", "def test_get_categories(self):\n pass", "def update_category_map(self, category_list):\n for category in category_list:\n skip = False\n for i in [\"wiki\", \"sources\", \"article\", \"stub\",\n \"wayback\", \"cs1\"]:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()", "def categories(self):\n return self._categories", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])", "def categories():\n\tcategories = [\n\t\t'News',\n\t\t'Technology',\n\t\t'Music',\n\t\t'Sports'\n\t]\n\tresponse = { 'response': categories }\n\treturn jsonify(response)", "def get_categories():\n try:\n result = {\n \"success\": True,\n \"categories\": get_all_categories()\n }\n return jsonify(result)\n\n except Exception as exp:\n abort(exp.code)", "def find_categories_used_dict(request):\n categories_used = []\n\n for item_index, item in enumerate(all_shopping_items(request)):\n category_dict = {\n 'category': item.category.category,\n }\n if item_index == 0:\n categories_used.append(category_dict)\n else:\n add_category = True\n\n for list_item in categories_used:\n \n if list_item['category'] == item.category.category:\n add_category = False\n \n if add_category:\n categories_used.append(category_dict)\n\n return categories_used", "def Categories(self, default=[None]):\n return self.data.get('categories', default)", "def add_category(self, category: str) -> None:\n for letter in self.data:\n if not self.data[letter].get(category):\n self.data[letter][category] = []\n print(f'Categoria: {category} adicionada ao dicionário.')\n self.save()\n self.beautify_json()", "def list_categories(self):\n raise NotImplementedError()", "def encode(category_main : ):", "def _categories(self, txt):\n\n # It is slightly faster like this because we are nto creating\n # a lambda obj each time.\n def first_part(s):\n return s.split(']]', 1)[0].split('|')[0]\n\n return map(first_part, txt.split(\"[[Category:\")[1:]) + \\\n [\"wikibase-article\"]", "def test_get_categories_from_json():\n allocator = RecipeAllocator()\n allocator.load_data(\n orders_dir=\"tests/orders.json\", recipes_dir=\"tests/recipes.json\"\n )\n allocator.get_categories_from_json()\n assert list(allocator.portion_categories_dict.keys()) == [\n \"two_portions\",\n \"four_portions\",\n ] and list(allocator.recipe_categories_dict.keys()) == [\n \"two_recipes\",\n \"three_recipes\",\n \"four_recipes\",\n ]", "def categories(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"categories\")", "def getCategories(self):\n categories = set()\n for article in self.articles.values():\n categories.add(article.category)\n return categories", "def parseCategories(self, unparsedCateg):\n category_attributes = ('CategoryID', 'CategoryName', 'CategoryLevel', 'BestOfferEnabled', 'CategoryParentID')\n categories = []\n for categoryChild in unparsedCateg:\n attributes = {}\n for category in categoryChild:\n if self.getXmlTagname(category.tag) in category_attributes:\n attributes[self.getXmlTagname(category.tag)] = category.text\n categories.append((attributes['CategoryID'],\n attributes['CategoryName'],\n attributes['CategoryLevel'],\n attributes['CategoryParentID'],\n 1 if 'BestOfferEnabled' in attributes else 0, )) # Ensure correct order.\n return categories", "def get_kwargs(d):\n return {\"categories\": d.get(\"categories\", None)}", "def get_categories(self):\n cats = []\n for post in self:\n cats.extend(post.Categories)\n return list(sorted(set(cats)))", "def add_categories(categories, business):\n\tfor category in business[CATEGORIES]:\n\t\tcategories.add(category)", "def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat", "def getCategories(self):\n logger.debug(\"Func: getCategories\")\n\n return self._categories", "def update_categories(self):\n categories = {}\n datasets = self.data['dataset']\n used_categories = self._get_list_categories_used(datasets)\n for category in used_categories:\n categories.update({\n category: self._get_datasets_tasks_by_category(datasets, category)\n })\n self.data[\"category\"] = categories", "def categories(data):\n if data:\n for i in data:\n category = CategoriesModel(categories=i['categories'])\n category.save()", "def categories(self) -> List[Category]:\n return list(set(self.mapping.values()))", "def get_categories():\n return session.query(Category)", "def wash_categories(product: dict):\n\n i = 0\n while i <= len(product['categories_tags']) - 1:\n if ':' in product['categories_tags'][i]:\n product['categories_tags'][i] = \\\n (product['categories_tags'][i].split(':'))[1]\n i += 1\n\n product['categories'] = product['categories'].split(',')\n i = 0\n while i <= len(product['categories']) - 1:\n if ':' in product['categories'][i]:\n product['categories'][i] = \\\n (product['categories'][i].split(':'))[1]\n i += 1", "def get_categories():\n\n url = 'https://fr.openfoodfacts.org/categories.json'\n data = requests.get(url).json()\n with open('data/categories.json', 'w') as file:\n file.write(json.dumps(data, indent=4))", "def get_category_data_off(self):\n list_categories_name=[]\n cat = requests.get('https://fr.openfoodfacts.org/categories?json=true')\n cat_data = cat.json()\n tags_list = cat_data['tags']\n print (len(tags_list))\n list_of_random_tags_list = random.sample(tags_list, k=self.view.num_to_select)\n\n for category in list_of_random_tags_list:\n try :\n category_name = category['name']\n print(category_name)\n list_categories_name.append(category_name)\n print (list_categories_name)\n self.list_categories = list_categories_name # list_categories_name is passed in the instance property\n except KeyError:\n pass\n except UnicodeEncodeError:\n pass", "def getAllCategories(self):\n return self.categories", "def categories(self):\n return self.env.categories", "def get_categories(self, categories):\r\n category, created = Categories.objects.get_or_create(name=categories)\r\n category.save()", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def categories(root='.'):\n cats_dict = dict()\n for dirpath, filename, file, date in traverse_posts(root):\n # extract title and categories from frontmatter\n with open('{}/{}'.format(dirpath, filename), 'r') as f:\n fm = frontmatter.loads(f.read())\n title, cat = fm['title'], fm['categories'].replace(' ', '/')\n file = {'date': date, 'file': file, 'title': title}\n for base_cat in parse_base_cats(cat):\n if not base_cat in cats_dict:\n cats_dict[base_cat] = []\n if cat in cats_dict:\n cats_dict[cat].append(file)\n else:\n cats_dict[cat] = [file]\n\n return OrderedDict(sorted(cats_dict.items(), key=lambda x: x[0]))", "def get_category(self, obj):\n cat_lst = []\n for k, v in obj.items():\n cat_lst = cat_lst + list(v.keys())\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n cat_lst = cat_lst + list(in_v.keys())\n in_k, in_v = list(in_v.items())[-1]\n simpl_lst = [i for n, i in enumerate(cat_lst) if i not in cat_lst[:n]]\n res = []\n for cat in simpl_lst:\n if cat not in self._loop_name:\n re_outer = re.compile(r'([^A-Z ])([A-Z])')\n re_inner = re.compile(r'(?<!^)([A-Z])([^A-Z])')\n res.append(re_outer.sub(r'\\1 \\2', re_inner.sub(r' \\1\\2', cat)))\n self._category = res", "def subcategories(self):\r\n return [self.decode_string(x) for x in self.extra.get('Subcategories', []) if x]", "def get_categories():\n bu = 'http://www.watchonlinemovies.com.pk'\n r = requests.get(bu, headers=mozhdr)\n if r.url != bu:\n bu = r.url\n items = {'ARecently Uploaded Movies': bu,\n 'B2018 Movies': bu + 'category/indian-movies/2018-full-movies/',\n 'C2018 English Movies': bu + 'category/hollywood-movies/2018-movies-hollywood/',\n 'D[COLOR yellow]** Search **[/COLOR]': bu + '?s=',\n 'Z[COLOR red]Note: This addon is no longer supported, please install WatchOnlineMovies-New from ReasonsRepository [/COLOR]': 'book'}\n \n return items", "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def _loadCategories(self):\n logger.debug(\"Func: _loadCategories\")\n\n if os.path.isfile(self._pathsDict[\"categoriesFile\"]):\n categoriesData = self._loadJson(self._pathsDict[\"categoriesFile\"])\n if categoriesData == -2:\n return -2\n else:\n categoriesData = self._sceneManagerDefaults[\"defaultCategories\"]\n # categoriesData = [\"Model\", \"Shading\", \"Rig\", \"Layout\", \"Animation\", \"Render\", \"Other\"]\n self._dumpJson(categoriesData, self._pathsDict[\"categoriesFile\"])\n return categoriesData", "def cat_parser(self, response):\n categories = []\n for product in response:\n # crawling categories of each product\n prod_cat = product[\"categories\"].split(\", \")\n for cat in prod_cat:\n if cat not in categories:\n categories.append(cat)\n return categories", "def _get_categories(self, *args):\n raise NotImplementedError(self, \"_get_categories\")", "def get_categories():\n categories = [c.serialize for c in session.query(Category).all()]\n for c in range(len(categories)):\n items = [\n i.serialize for i in session.query(Item).filter_by(\n category_id=categories[c][\"id\"]).all()]\n if items:\n categories[c][\"Item\"] = items\n return jsonify(Category=categories)", "def get_categories():\n categories = Category.query.all()\n formatted_categories = {}\n for category in categories:\n formatted_categories[category.id] = category.type\n\n return jsonify({\n 'success': True,\n 'categories': formatted_categories,\n 'total_categories': len(categories)\n })", "def get_categories():\n categories = Category.query.order_by(Category.id).all()\n categories = {category.id: category.name for category in categories}\n\n response = jsonify({\"success\": True, \"categories\": categories})\n\n return response", "def get_categories(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get payees from database\n cur.execute(\"SELECT * FROM categories\")\n cats_data = cur.fetchall()\n\n # convert into a list of payee dictionaries\n cats_list = []\n [cats_list.append({'category_id': cat[0],\n 'parent_id': cat[1],\n 'category_name': cat[2]})\n for cat in cats_data]\n\n # close the cursor\n self.close_cursor()\n\n return cats_list", "def insert_categories(self):\n logic = CategoryLogic()\n \n # We create the list of category objects\n categories = self.objects_factory.create_category_object_list()\n categories = set(categories)\n \n for category in categories:\n logic.insert(category)", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def get_categories():\n item_type = \"categories\"\n info_dict = spotify.categories()\n items = info_dict[item_type][\"items\"]\n categories = []\n for i in range(len(items)):\n category_name = items[i][\"name\"]\n category_id = items[i][\"id\"]\n categories.append({\"Category Name\": category_name,\n \"Category ID\": category_id\n })\n return categories", "def site_to_category():\n return {\"UNEW\": 1, \"USFD\": 2, \"CAU\": 3, \"TASMC\": 4, \"RBMF\": 5}", "def group_categories(categories_file):\n # map each category id to its name\n id_to_category = {}\n for category in categories_file['categories']:\n id_to_category[category['id']] = category['name']\n\n image_categories = {}\n for category in categories_file['annotations']:\n if category['image_id'] not in image_categories:\n image_categories[category['image_id']] = []\n if id_to_category[category['category_id']] not in image_categories[category['image_id']]:\n image_categories[category['image_id']].append(id_to_category[category['category_id']])\n return image_categories", "def parse_categories(categories, categories_list, reports):\n remove_list = [\"overview\", \"configuration\", \"use_cases\", \"content\"]\n for distinct_category in categories:\n report_categories = []\n category = {}\n for report_doc in reports:\n if distinct_category in report_doc[\"report-categories\"]:\n category[\"category\"] = distinct_category\n category[\"description\"] = \"\"\n for x in remove_list:\n if x in report_doc:\n del report_doc[x]\n report_categories.append(report_doc)\n category[\"reports\"] = report_categories\n categories_list.append(category)", "def avail_categories(self):\n # retrieve categories\n categories = self.show_all_categories()\n # for each category, retrieve packages\n output = {}\n for category in categories:\n packages = self.show_category(category)\n output[category] = packages\n\n return output", "def get_ordered_verb_categories(self, taxonomy, **kwargs):\n return [verb_cat for verb_cat in taxonomy.verb_categories.all().order_by('level')]", "def _get_categories(cats):\n if \",\" in cats:\n return tuple([c.lower().strip() for c in cats.split(\",\")])\n else:\n return (cats.lower().strip(), )", "def _traverse_categories():\n \n csv_output_dict = _get_csv_data()\n print csv_output_dict.keys()\n #~ if csv_output_dict:\n #~ return csv_output_dict\n #~ else:\n final_output_url_list = []\n traverse_cat_output = {}\n # brand_seed_page = 'https://www.broderbros.com/cgi-bin/online/webshr/browse-brand-all.w'\n seed_page = 'https://www.broderbros.com/cgi-bin/online/webshr/browse-category-all.w'\n \n browser = utils.create_browser(SLEEP_MIN, SLEEP_MAX)\n browser.open(seed_page)\n html_response = browser.response().read()\n \n soup = BeautifulSoup(html_response)\n \n all_category_names = soup.findAll('div', {'class': 'brwsBrndLogoClip'})\n for category_names in all_category_names:\n temp_category_url = str(category_names.a['href'])\n category_url_name = temp_category_url.split('&catname=')[-1]\n category_url = 'https://www.broderbros.com/cgi-bin/online/webshr/search-result.w?nResults=5000&mc=&RequestAction=advisor&RequestData=CA_CategoryExpand&bpath=c&CatPath=All Products////BRO-Categories////'+str(category_url_name)\n category_url = category_url.replace(' ', '%20')\n category_names = category_names.a.img['alt'].lower()\n returned_url_list = _get_product_urls(category_url, category_names)\n final_output_url_list.extend(returned_url_list)\n \n for productsurl in final_output_url_list:\n if productsurl['id'] in traverse_cat_output.keys() and productsurl['category_list'] != traverse_cat_output[productsurl['id']]['category_list']:\n traverse_cat_output[productsurl['id']]['category_list'].extend(productsurl['category_list'])\n else:\n traverse_cat_output[productsurl['id']] = productsurl\n \n brand_output_categories = _traverse_categories_brand()\n for brand_url in brand_output_categories:\n if brand_url['id'] in traverse_cat_output.keys():\n traverse_cat_output[brand_url['id']]['brand'] = brand_url['brand']\n else:\n traverse_cat_output['id'] = brand_url\n \n if TESTRUN:\n for k,v in traverse_cat_output.items():\n print v\n print '*'*78\n for item_id in traverse_cat_output.keys():\n print item_id\n csv_data = csv_output_dict.get(item_id)\n if csv_data:\n traverse_cat_output[item_id]['brand'] = csv_data['brand']\n if not traverse_cat_output[item_id].get('brand'):\n del traverse_cat_output[item_id]\n \n return traverse_cat_output", "def report_categories():\n return list(sorted(set([rt.category for rt in report_types()])))", "def all_categories_handler():\n categories = getAllCategories()\n return jsonify(categories=[i.serialize for i in categories])", "def getCategories(URIList, annotatedWords):\n \n L=[]\n wordByCategory=dict()\n i=0\n for URI in URIList:\n sparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\n sparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX dc: <http://purl.org/dc/terms/>\n SELECT ?label\n WHERE { \"\"\"+ \"<\"+ URI + \"> dc:subject ?label }\"\n )\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n for result in results[\"results\"][\"bindings\"]:\n category=result[\"label\"][\"value\"].encode(\"UTF-8\").split(\"/\")[-1].replace(\"_\",\" \").replace(\"Category:\",\"\")\n L.append(category)\n if category in wordByCategory:\n if i>= len(annotatedWords):\n print \"getCategories is computing URI=\",URI\n print \"Trying to append element number\",i,\n print \"from a list having\",len(annotatedWords),\"elements.\"\n wordByCategory[category].append(annotatedWords[i])\n else:\n wordByCategory[category]=[annotatedWords[i]]\n i+=1\n return L, wordByCategory", "def category():\n kwargs = {k: parse(v) for k, v in request.args.to_dict().items()}\n return jsonify(objects=get_categories(**kwargs))", "def get_categories_info():\n\n categories_info = RedisSession().session.get('categories_info')\n\n if not categories_info:\n categories_info = Category.objects().only('name', 'locale')\n RedisSession().session.set('category_info', categories_info.to_json())\n return categories_info", "def get_selected_categories_and_codes(self):\n\n self.codes, self.categories = self.app.get_codes_categories()\n # Extra keys for hierarchy charts\n for code in self.codes:\n code['count'] = 0\n code['parentname'] = \"\"\n for cat in self.categories:\n cat['count'] = 0\n cat['parentname'] = \"\"\n\n node = self.ui.comboBox_category.currentText()\n if node == \"\":\n return\n for category in self.categories:\n if category['name'] == node:\n node = category\n node['supercatid'] = None\n break\n \"\"\" Create a list of this category (node) and all its category children.\n Note, maximum depth of 100. \"\"\"\n selected_categories = [node]\n i = 0 # Ensure an exit from loop\n new_model_changed = True\n while self.categories != [] and new_model_changed and i < 100:\n new_model_changed = False\n append_list = []\n for n in selected_categories:\n for m in self.categories:\n if m['supercatid'] == n['catid']:\n append_list.append(m)\n for n in append_list:\n selected_categories.append(n)\n self.categories.remove(n)\n new_model_changed = True\n i += 1\n self.categories = selected_categories\n # Remove codes that are not associated with these categories\n selected_codes = []\n for cat in self.categories:\n for code in self.codes:\n if code['catid'] == cat['catid']:\n selected_codes.append(code)\n self.codes = selected_codes", "def post_process_cif_category(cif, category_name):\n if not cif[category_name]: # nothing in the category => should be removed\n cif.pop(category_name)\n return\n\n for k, v in cif[category_name].items():\n if isinstance(v, list):\n if len(v) == 1:\n cif[category_name][k] = v[0]\n\n if not v:\n cif.pop(category_name)\n return", "def get_all_categories():\n return jsonify({\n \"success\": True,\n \"categories\": _read_all_categories()\n })", "def category_names(self):\n return self._category_names", "def subcategories(self, ssub_cat, user_cat):\n select_sub_cat = {}\n min_sub_cat = []\n print(\"POUR LA CATEGORIE\", user_cat, \":\")\n for sub in ssub_cat:\n select_sub_cat.update({sub.id: sub.name})\n for sub_cat in sorted(select_sub_cat.items(), key=lambda t: t[0]):\n print(sub_cat[0], sub_cat[1].replace('_', ' '))\n min_sub_cat.append(sub_cat[0])\n return min_sub_cat", "def _categorize_questions(questions):\n # type: (Dict) -> Dict[str, List[str]]\n questions_by_category_tag = defaultdict(list) # type: Dict[str, List[str]]\n for question_name, question_class in questions.items():\n template_dict = question_class.template\n tags = template_dict.get(\"instance\").get(\"tags\")\n category_tag = _categorize_question(question_name, tags)\n questions_by_category_tag[category_tag].append(question_name)\n return questions_by_category_tag", "def categories(self, categories):\n self._categories = categories", "def Categories(self, new_categories):\r\n if not isinstance(new_categories, ListType):\r\n raise TypeError(\"The supplied categories must be a list of \"\r\n \"strings.\")\r\n for new_cat in new_categories:\r\n if not isinstance(new_cat, str):\r\n raise TypeError(\"Invalid category: not of type 'string'\")\r\n elif new_cat not in self._metadata_map.CategoryNames:\r\n raise ValueError(\"The category '%s' is not in the mapping \"\r\n \"file.\" % new_cat)\r\n\r\n if not self._suppress_numeric_category_check:\r\n if not self._metadata_map.isNumericCategory(new_cat):\r\n raise TypeError(\"The category '%s' is not numeric. Not \"\r\n \"all values could be converted to numbers.\"\r\n % new_cat)\r\n\r\n if not self._suppress_category_uniqueness_check:\r\n if self._metadata_map.hasUniqueCategoryValues(new_cat):\r\n raise ValueError(\"All values in category '%s' are unique. \"\r\n \"This statistical method cannot operate \"\r\n \"on a category with unique values (e.g. \"\r\n \"there are no 'within' distances because \"\r\n \"each group of samples contains only a \"\r\n \"single sample).\" % new_cat)\r\n\r\n if not self._suppress_single_category_value_check:\r\n if self._metadata_map.hasSingleCategoryValue(new_cat):\r\n raise ValueError(\"All values in category '%s' are the \"\r\n \"same. This statistical method cannot \"\r\n \"operate on a category that creates only \"\r\n \"a single group of samples (e.g. there \"\r\n \"are no 'between' distances because \"\r\n \"there is only a single group).\"\r\n % new_cat)\r\n\r\n self._categories = new_categories", "def test_unused_categories_logic(self):\n s = ak.array([str(i) for i in range(10)])\n s12 = s[1:3]\n cat = ak.Categorical(s)\n cat12 = cat[1:3]\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat12).to_list())\n self.assertSetEqual(set(ak.unique(s12).to_list()), set(ak.unique(cat12).to_list()))\n\n cat_from_codes = ak.Categorical.from_codes(ak.array([1, 2]), s)\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat_from_codes).to_list())\n self.assertSetEqual(\n set(ak.unique(s12).to_list()),\n set(ak.unique(cat_from_codes).to_list()),\n )", "def sort_dict_by_category_and_pos_left(dict_collection: {str: []}):\n list_sorted_by_cate_and_pos = []\n for kind in (CATEGORIES_UNMOVABLE + CATEGORIES_TREE + CATEGORIES_MOVABLE):\n if dict_collection.get(kind) is not None:\n list_sorted_by_pos = []\n for item_or_group in dict_collection[kind]:\n list_sorted_by_pos.append(item_or_group)\n list_sorted_by_cate_and_pos += sorted(list_sorted_by_pos, key=lambda x: x.left)\n\n return list_sorted_by_cate_and_pos" ]
[ "0.7909571", "0.7163796", "0.7039748", "0.7027904", "0.7026439", "0.6947721", "0.68675554", "0.684357", "0.68159634", "0.6771762", "0.6768741", "0.67397535", "0.6650347", "0.6645734", "0.6628202", "0.6627645", "0.662422", "0.66078293", "0.6583735", "0.64916193", "0.6485819", "0.6484338", "0.64348084", "0.64057636", "0.64033735", "0.64031553", "0.6400484", "0.63704973", "0.63356864", "0.63272864", "0.63210344", "0.6316405", "0.63113034", "0.62876964", "0.62684476", "0.6250978", "0.6245072", "0.62441254", "0.62396765", "0.6235645", "0.62150604", "0.621364", "0.6198314", "0.618874", "0.6185302", "0.6185163", "0.61820006", "0.6136935", "0.61211276", "0.6112003", "0.6110027", "0.6107979", "0.6095798", "0.6093231", "0.60836977", "0.60798424", "0.60624486", "0.603994", "0.603866", "0.6032753", "0.6025087", "0.60249156", "0.6019809", "0.60176885", "0.6008534", "0.6006353", "0.5999698", "0.59849036", "0.59849036", "0.5984362", "0.59840995", "0.5980097", "0.597943", "0.59725404", "0.5971715", "0.59712315", "0.5967657", "0.5957838", "0.59520066", "0.59442365", "0.5936347", "0.59328324", "0.5932367", "0.5920698", "0.5918186", "0.591509", "0.591179", "0.5908333", "0.58992743", "0.5895059", "0.58861965", "0.58792025", "0.5877064", "0.58686036", "0.58613086", "0.5858735", "0.58564436", "0.5837261", "0.58365387", "0.5833643", "0.5826093" ]
0.0
-1
Provide the text contents to the rest of the class.
def setUp(self): self.message = "notification message"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_text(self):\n pass", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n\n self.text = text", "def text(self) -> str:", "def __init__(self):\n self.text = ''", "def get_text(self):", "def getText(self):", "def contents(self, text):\n self.app.contents = text", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))", "def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def text(self, text=None):\n if text is None:\n return self._text\n else:\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def text(self, text):\n\n self._text = text", "def __init__(self):\n self.content = \"\"", "def get_text(self) -> str:\n return self.text", "def textContent(self, text: str) -> None: # type: ignore\n if self._inner_element:\n self._inner_element.textContent = text\n else:\n # Need a trick to call property of super-class\n super().textContent = text # type: ignore", "def text(self) -> str:\n return self.__text", "def obtain_text():\n pass", "def __str__(self):\n\t\treturn self.text", "def text(self):\n return self.content", "def run(self, text):\r\n pass", "async def text(self, ctx):\n pass", "def set_text(self, text):\n\n self.text = text", "def set_text(self, T):\n self.text = T", "def addContent(text):", "def WriteText(self, text):\n print(text)", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def get_text(self):\n return self.text", "def text(self):\n # type: () -> str\n return self._text", "def write(self, text):\n self.text = text", "def text(self, x, y, txt, cls=None, style=None, tags=None):\n x, y = self._meta.units(x, y)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n tag_str = (' '.join('%s=\"%s\"' % (k, v) for k, v in tags.iteritems()) + ' ') if tags else ''\n self.elements.append(\"\"\"\n <text x=\"%s\" y=\"%s\" %s%s%s>%s</text>\n \"\"\".strip() % (\n x, y, cls_str, style_str, tag_str, txt\n ))\n return self", "def __repr__(self):\n return self.text", "def __repr__(self):\n return self.text", "def print_text(self, text1, text2):", "def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def text(self):\n return self.__text", "def small_text(self):\n pass", "def Add_Text( self, th ):\r\n self.text_handle = th", "def inner_text(self, text) -> None:\n logging.info(f\"inner text. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.innerText=\"{text}\";\"\"\"\n self._execute_javascript(js)", "def __init__(self, as_text=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.as_text = as_text", "def setText(*args):", "def get_plain_text(self):\n raise NotImplementedError(\"get_plain_text is not implemented\")", "def data(self, text):\n if self._keep_text:\n self._text.append(text)", "def show(self):\n self.set_text(self.read())", "def content(self):\n raise NotImplementedError()", "def __init__(self):\n self.title_text = ''\n self.meta_text = ''\n self.url_text = ''\n self.heading_text = ['','','','','','']\n self.body_text = ''", "def setText(self, text=\"\"):\n self._text = text\n self._text_item.setHtml(self._compile_text())", "def __repr__(self) -> str:\n return f\"{self.text}\"", "def text(self) -> str:\n return self._text", "def add_text(self, text):\n self.text = self.text + text", "def add_text(self, text):\n text_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/text.html')\n text_output = text_template.render(text=text)\n self.contents.append(text_output)", "def text(self) -> str:\n return self._impl.get_text()", "def Print(self, text):\n pass", "def record_text(self, **kw):\n return self._text(self._record_template, **kw)", "def text(self, x, y, txt, cls=None, style=None, tags=None, linespacing=10, valign=1.0):\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n tag_str = (' '.join('%s=\"%s\"' % (k, v) for k, v in tags.items()) + ' ') if tags else ''\n lines = txt.split(\"\\n\")\n hh = linespacing * len(lines)\n y -= (hh - linespacing) * valign\n for line in lines:\n xx, yy = self._meta.units(x, y)\n self.elements.append(\"\"\"\n <text x=\"%s\" y=\"%s\" %s%s%s>%s</text>\n \"\"\".strip() % (\n xx, yy, cls_str, style_str, tag_str, line\n ))\n y += linespacing\n return self", "def get_text(self) -> str:\n return self._text", "def get_text(self, course): # pylint: disable=unused-argument\r\n raise NotImplementedError", "def run(self, name, rawtext, text, lineno, inliner, options=None,\n content=None):\n raise NotImplementedError", "def large_text(self):\n pass", "def text(self):\n return ''", "def set_text(self, text):\n self.set_text_f(\"%s\", text)", "def __init__(self, name=\"\", value=\"\"):\n super().__init__(\"text\", name)\n self.value = value", "def getText(self):\r\n return \"\"", "def Text(self):\n return self._text", "def getText(self):\n\t\treturn self.bsource.get_text() # \"no value for 'self' in unbound method call\" pylint error. Still runs. Idk. ", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def SetText(self, text):\r\n\r\n self._text = text", "def __init__(self, content):\n self._content = content.strip()", "def showcontents(self):\n # See ToolTip for an example\n raise NotImplementedError", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def GetText(self):\r\n \r\n return self._text", "def SetText(self, text):\r\n\r\n self._text = text\r\n return self", "def populate_contents(self):\n raise Exception('Implement me!')", "def get_text(self):\n return self.get_property('text')", "def set_text(self, new_text):\n\n self.output['text'] = new_text", "def __call__(self, string):\n return Text(string, self)", "def set_text(self):\n\n if not self.text and len(self.get_files()) > 0:\n self.text = self.files[0].get_title()\n # if \"_\" in str(self.text):\n if re.match(\"[0-9]_[0-9]\", self.text) is not None:\n self.text = self.files[0].get_parent()[\"title\"]\n else:\n try: \n int(self.text)\n # is a simple int\n if int(self.text) > 20:\n self.text = self.files[0].get_parent()[\"title\"]\n except Exception as e:\n # not a simple int\n # do nothing cause probably set already\n pass\n self.text = self.text.replace(\"_\", \" \")\n self.set_keywords()", "def text(self):\n return \"\\n\".join(self.raw_text)", "def printText(self, text):\n self._append_plain_text(text)" ]
[ "0.7451575", "0.738682", "0.738682", "0.7353386", "0.7223362", "0.7178723", "0.7166255", "0.7111192", "0.7060302", "0.6893968", "0.6893968", "0.6893968", "0.6893968", "0.6893968", "0.6882488", "0.68126476", "0.6810179", "0.6803394", "0.6770941", "0.6770941", "0.6770941", "0.6770941", "0.6770941", "0.6770941", "0.6731761", "0.6691539", "0.66586465", "0.6650329", "0.6604534", "0.6592466", "0.65734273", "0.6564183", "0.65583456", "0.6552527", "0.6546128", "0.6540027", "0.65396595", "0.6536414", "0.65267324", "0.6519999", "0.6512635", "0.65094316", "0.65020543", "0.65020543", "0.64953107", "0.6493583", "0.64644974", "0.64619875", "0.64619875", "0.64619875", "0.64619875", "0.64619875", "0.6460858", "0.6460002", "0.64484614", "0.6447509", "0.64361817", "0.6427389", "0.6403392", "0.6392742", "0.6391519", "0.63788354", "0.6374807", "0.63723445", "0.63691103", "0.63657147", "0.6364474", "0.6364397", "0.6346791", "0.6334833", "0.6332842", "0.6330888", "0.6330702", "0.6253987", "0.624598", "0.6245544", "0.6236235", "0.62178665", "0.61993647", "0.6198411", "0.6197368", "0.6195539", "0.6181399", "0.6163329", "0.6152657", "0.61443573", "0.61435676", "0.61435676", "0.61435676", "0.61435676", "0.61435676", "0.61435676", "0.61399543", "0.61383617", "0.61340725", "0.6126805", "0.6115722", "0.6112449", "0.6107748", "0.61075455", "0.6104738" ]
0.0
-1
Test that exceptions are caught.
def test_invalid_webhook(self, mock_send): logging.disable(logging.CRITICAL) # Don't log to stderr during this unit test mock_send.side_effect = OSError("Some error") send_notification("invalid_webhook", self.message) mock_send.assert_called() logging.disable(logging.NOTSET) # Reset the logging
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_exception_handling(self) -> None:\n try:\n 1 / 0\n assert False, \"should have thrown a ZeroDivisionError\"\n except (ZeroDivisionError, TypeError, NameError) as err:\n assert type(err) is ZeroDivisionError\n\n # From within tests, use `pytest.raises` to write assertions about raised exceptions\n with pytest.raises(ZeroDivisionError) as err:\n 1 / 0\n assert isinstance(err.value, ZeroDivisionError)", "def test_exceptions_on_the_test(self):\n try:\n raise Exception(\"faki faki faki\")\n except Exception as e:\n #TODO: write a proper test to verify exception logging\n res = output(e, \"ERROR\")\n eq_(type(\"\"),type(res))\n eq_(\"GREAP ERROR faki faki faki\",res)\n eq_(True,mymock.called)", "def test_print_exception() -> None:\n try:\n raise ValueError(\"foo\")\n except Exception as ex:\n print_exception(ex, \"Message\")", "def test_exception(self) -> None:\n raise Exception(self.text)", "def testRaisesException(self):\n\t\tx = BaseAction('x')\n\t\tx.throws = Exception()\n\t\tself.failUnlessRaises(Exception, x.playback)", "def test_exceptions():\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.connect(path=r\"No process with this please\")\r\n assert False\r\n except application.ProcessNotFoundError:\r\n print('ProcessNotFoundError has been raised. OK.')\r\n\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.start(cmd_line = r\"No process with this please\")\r\n assert False\r\n except application.AppStartError:\r\n print('AppStartError has been raised. OK.')", "def test_raise_():\n with pytest.raises(Exception):\n raise_(ValueError)", "def test_are_chained_exceptions_printed(self):\n\n io = BufferedSystemIO()\n\n try:\n try:\n raise IndexError('Invalid index 5')\n except IndexError as index_exc:\n raise Exception('There was an error with index') from index_exc\n\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('(Caused by) IndexError:', io.get_value())\n self.assertIn('Exception:', io.get_value())\n self.assertIn('There was an error with index', io.get_value())", "def test_exception_in_exception_handler(exception_app):\n request, response = exception_app.test_client.get(\n '/error_in_error_handler_handler')\n assert response.status == 500\n assert response.body == b'An error occurred while handling an error'", "def unexpectedException(self):", "def check_exception(action, exception, message):\n\n try:\n action()\n assert 0, \"Expected exception\"\n except exception as e:\n assert e.args[0] == message", "def test_exception():\n class Handler(RequestHandler):\n def get(self):\n self.set_status(400)\n self.write('Fail')\n\n app = Application([url('/hello', Handler)])\n\n with Tester(app) as tester:\n for i in range(5):\n try:\n yield tester.http_client.fetch(\n tester.url_for('/hello'))\n except HTTPError as e:\n assert 400 == e.code\n else:\n assert False", "def test_exception_does_not_catch_its_parent():\n with pytest.raises(EnvironmentError):\n try:\n raise EnvironmentError\n except AppNotWorking:\n pass", "def test_handle_exception(self, mock_http, mock_tb, mock_time,\n mock_excepthook):\n\n mock_traceback = Mock()\n exc_type = TypeError\n message = 'oh snap'\n exception = exc_type(message)\n\n mock_tb.extract_tb.return_value = [\n ('foo.py', 42, 'foo', 'return bar()'),\n ('bar.py', 101, 'bar', 'return baz(x)'),\n ('baz.py', 34, 'baz', 'return x + 1')\n ]\n mock_tb.format_exc.return_value = 'this is a traceback'\n now = time.time()\n mock_time.time.return_value = now\n\n kaput._handle_exception(exc_type, exception, mock_traceback)\n\n mock_tb.extract_tb.assert_called_once_with(mock_traceback)\n mock_tb.format_exc.assert_called_once_with()\n mock_http.request.assert_called_once_with(\n 'https://kaput-dev.appspot.com/api/v1/exception',\n method='POST',\n headers={'kaput-api-key': self.api_key,\n 'Content-Type': 'application/json'},\n body=json.dumps({\n 'project_id': self.project_id,\n 'timestamp': now,\n 'exception': exc_type.__name__,\n 'message': message,\n 'frames': mock_tb.extract_tb.return_value,\n 'stacktrace': mock_tb.format_exc.return_value\n })\n )\n mock_excepthook.assert_called_once_with(\n exc_type, exception, mock_traceback)", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def test_handled_unhandled_exception(exception_app):\n request, response = exception_app.test_client.get('/divide_by_zero')\n assert response.status == 500\n soup = BeautifulSoup(response.body, 'html.parser')\n assert soup.h1.text == 'Internal Server Error'\n\n message = \" \".join(soup.p.text.split())\n assert message == (\n \"The server encountered an internal error and \"\n \"cannot complete your request.\")", "def assert_console_raises(self, exception, **kwargs):\r\n with pytest.raises(exception):\r\n self.execute_console_task(**kwargs)", "def test_http_error(self):\n self.assertEqual(-1, self.__uft.failed_tests('raise'))\n self.assertEqual(-1, self.__uft.passed_tests('raise'))\n self.assertEqual(-1, self.__uft.skipped_tests('raise'))", "def assertRaises(test_case, fn, exception_class):\n\n exc = None\n try:\n fn()\n except Exception as e:\n exc = e\n test_case.assertTrue(exc is not None)\n test_case.assertTrue(isinstance(exc, exception_class))", "def assertion_errored(self, func, exception):", "def test_exception_result(self):\n raiser = self.make_wrapped_function()\n self.assertRaises(ZeroDivisionError, raiser, ZeroDivisionError())", "def test_exception_message(self, db_mock: Mock) -> None:\n\n try:\n raise Exception(\"test\") # pylint: disable=broad-exception-raised\n except Exception as exc: # pylint: disable=broad-exception-caught\n self.plugin.add(\"exception test\", exc)\n self.plugin.pull()\n\n self.assertEqual(\n db_mock.call_args_list[0][0][0][0][1][1],\n \"test\"\n )", "def test_base_exception(self) -> None:\n with pytest.raises(BaseException) as e:\n 1 / 0\n assert isinstance(e.value, ZeroDivisionError)", "def test_reraise_no_trigger() -> None:\n with pytest.raises(RuntimeError):\n with reraise(RuntimeError(\"got exception\")):\n raise ValueError(\"boom\")", "def test_exception_execution(self):\r\n a_thread = workerthread.WorkerThread(exception_queue=self.exception_queue,\r\n return_queue=self.message_queue,\r\n target=self.sample_exception_function, args=(1, 2))\r\n a_thread.start()\r\n a_thread.join()\r\n exc_type, exc = self.exception_queue.get()\r\n self.assertTrue(isinstance(exc, Exception))", "def test_exceptions_init_nonexistent():\n with pytest.raises(IOError):\n Exceptions(os.path.join(os.path.dirname(__file__),\n 'nonexistent_exceptions.yaml'))", "def test_check_for_errors(self):\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n\n request = mock.Mock()\n exception = ValueError(\"failed\")\n\n successful_future = mock.Mock(exception=mock.Mock(return_value=None))\n failed_future = mock.Mock(exception=mock.Mock(return_value=exception))\n completed_futures = (\n ([successful_future] * 2) + [failed_future] + [successful_future]\n )\n\n with pytest.raises(exception.__class__):\n downloader._check_for_errors(request, completed_futures)", "def exception(self):\n raise Exception(\"Exception test\")", "def exception_test(func, exception, exp_msg, *args, **kw):\n with pytest.raises(exception) as err:\n func(*args, **kw)\n\n assert exp_msg in str(err.value)", "def raises(*exceptions):\r\n statistics.assertions += 1\r\n error = Error()\r\n try:\r\n yield error\r\n except exceptions, e:\r\n error.exc = e\r\n else:\r\n exceptions = exceptions[0] if len(exceptions) == 1 else exceptions\r\n raise AssertionError(\"didn't raise %s when expected\" % _repr(exceptions))", "def assert_console_raises(self, exception, **kwargs):\n with self.assertRaises(exception):\n self.execute_console_task(**kwargs)", "def test_err():\n nt.assert_raises(ValueError,trapz,range(2),range(3))", "async def test_datasets_access_call_exception(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = ConnectionException()\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await fetch_datasets_access(pool, None)", "async def test_exception(players, strats, when, _):\n game = gamegen.samplegame(players, strats)\n sched = gamesched.samplegamesched(game)\n esched = utils.ExceptionScheduler(sched, 10, when)\n sgame = schedgame.schedgame(esched)\n rests = np.concatenate(\n [game.random_restrictions(3), np.ones((1, game.num_strats), bool)]\n )\n with pytest.raises(utils.SchedulerException):\n await asyncio.gather(*[sgame.get_restricted_game(rest) for rest in rests])", "def test_fail(f, msg='', contains=''):\n try:\n f()\n assert False,f\"Expected exception but none raised. {msg}\"\n except Exception as e: assert not contains or contains in str(e)", "def test_exception(self):\n\n sink = TObserver(immediate_continue=0)\n self.obs.observe(init_observer_info(sink))\n ack1 = self.left.on_next_list([select_completed])\n\n self.right.on_error(self.exception)\n\n self.assertIsInstance(self.measure_state(self.obs), ControlledZipStates.Stopped)\n self.assertEqual(self.exception, sink.exception)", "def test_exception_case(file_with_exception_value):\n with pytest.raises(ValueError, match=\"It is not a magic number!\"):\n read_magic_number(file_with_exception_value)", "def test_exception_raise(self):\n with self.assertRaises(Exception):\n IperfClientCommandBuilder().build_client_command()", "def unexpected_error(self, exception):", "def test_raise_with_proper_code_and_args(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException(101)\n self.assertEqual(cm.exception.status_code, 101)", "def test_task_finder_exception(test_operator, task_name, exception):\n with pytest.raises(exception):\n test_operator.find_task(task_name)", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def testExceptionRaisedByFunctions(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g.h(3, 4)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\tself.failUnlessRaises(Exception, x.g.h, 3, 4)", "def test_blink_exception(self):\n test_exception = BlinkException([1, \"No good\"])\n self.assertEqual(test_exception.errid, 1)\n self.assertEqual(test_exception.message, \"No good\")", "def test_excessive_Sigops(self):\n logging.info(\"Entered : test_excessive_Sigops \\n\")\n try:\n testExcessiveSigops(self)\n except (Exception, JSONRPCException) as e1:\n logging.info(e1)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n raise TestAssertionError({\"file_name\": fname, \"line_num\": exc_tb.tb_lineno, \\\n \"error_type\": exc_type.__name__, \"error_msg\": str( e1 ), \\\n \"n1\" : \"N/A\", \"n2\" : \"N/A\", \"amount\" : \"N/A\", \"numsig\" : \"N/A\"})", "def test_reraise_no_trigger_base() -> None:\n with pytest.raises(BaseException):\n with reraise(RuntimeError(\"got exception\")):\n raise BaseException(\"boom\")", "def test_start_tasks_db_raises():\n with pytest.raises(ValueError) as excinfo:\n tasks.start_tasks_db('some/great/path', 'mysql')\n exception_msg = excinfo.value.args[0]\n assert exception_msg == \"db_type must be a 'tiny' or 'mongo'\"", "def test_errback(self):\n def callable():\n raise TestException()\n\n clock = task.Clock()\n d = task.deferLater(clock, 1, callable)\n clock.advance(1)\n return self.assertFailure(d, TestException)", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_raise_exception2(self):\n with self.assertRaises(Exception):\n SshCommandBuilder(SERVER_USER, COMMAND)\\\n .set_ip_address(SERVER_IP)\\\n .set_hostname(SERVER_HOST)\\\n .to_build()", "def exception_tester(self, key, value, exc, msg):\n for val in [value, (value, 3)]:\n with self.assertRaises(exc) as e:\n self.dstore[key] = val\n self.assertEqual(e.exception.args[0], msg)", "def test_referral_not_found_exception():\n # When raising the exception\n with pytest.raises(ReferralNotFoundError) as e_info:\n raise ReferralNotFoundError(\"Custom exception message\")\n\n # Then the exception message should be as expected\n assert str(e_info.value) == \"Custom exception message\"", "def test_tolerate_dumb_signature(self, exception_class):\n\n try:\n i_live_but_why = exception_class(616)\n except Exception as exc:\n pytest.fail(str(exc))\n\n assert isinstance(i_live_but_why, exception_class)", "def assertion_failed(self, func, exception):", "def test_raise_using_invalid_code(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException('foo')\n self.assertEqual(cm.exception.status_code, 100)", "def test_uncaught_wsgi_exception(self):\n\n def app(environ, start_response):\n raise Exception(\"Oops\")\n\n event = {\n \"httpMethod\": \"POST\",\n \"path\": \"/\",\n \"queryStringParameters\": {\n \"x\": \"y\"\n },\n \"headers\": {\n \"Host\": \"localhost\",\n \"Content-Type\": \"text/plain\",\n \"Content-Length\": \"2\"\n },\n \"body\": \"Hi\"\n }\n context = DummyContext()\n\n with self.assertRaisesRegexp(Exception, \"Oops\"):\n result = Handler(app)(event, context)\n\n # TODO: Test exc_info is logged somewhere", "def test_normal_use(self):\n # Setup:\n class DatabaseError(Exception):\n pass\n\n # Python 2 and 3:\n from future.utils import raise_from\n\n class FileDatabase:\n def __init__(self, filename):\n try:\n self.file = open(filename)\n except IOError as exc:\n raise_from(DatabaseError('failed to open'), exc)\n\n # Testing the above:\n try:\n fd = FileDatabase('non_existent_file.txt')\n except Exception as e:\n assert isinstance(e.__cause__, IOError) # FileNotFoundError on\n # Py3.3+ inherits from IOError", "def test_raise_without_code(self):\n with self.assertRaises(CloudantFeedException) as cm:\n raise CloudantFeedException()\n self.assertEqual(cm.exception.status_code, 100)", "def test_class_errored(self, cls, exception):", "def _test_exception_in_worker_impl(self, pool, num_to_ventilate):\n # exception should be propagated to calling thread\n pool.start(ExceptionGeneratingWorker_5)\n for i in range(num_to_ventilate):\n pool.ventilate(\"Datanum_%d\" % i)\n with self.assertRaises(ValueError):\n pool.get_results()", "def test_ImportError(n=2):\n\n p = bad_import()\n\n try:\n p.result()\n except ImportError:\n print(\"Caught ImportError\")\n else:\n assert False, \"Raise the wrong Error\"", "def assert_raises(Exc, call, *arg, **kw):\n exc = None\n try:\n call(*arg, **kw)\n except (SystemExit, Exception), exc: # SystemExit isn't an Exception?!\n pass\n assert exc is not None, \"no exception; expected %s\" % Exc\n assert isinstance(exc, Exc), \"raised %s, not %s\" % (repr(exc), repr(Exc))\n return exc", "def test_exception_raise2(self):\n with self.assertRaises(Exception):\n IperfClientCommandBuilder()\\\n .set_server_ip(SERVER_IP)\\\n .set_server_hostname(SERVER_HOST)\\\n .build_client_command()", "def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"", "def test_exception(self):\n self.assertRaises(TypeError, lambda: self.init_model())", "def test_raises_exceptions(recwarn, code):\n res = DummyResource()\n res.set_exception_type(code)\n try:\n res.raise_exception()\n except exceptions.WandException as e:\n assert not e.__class__.__name__.endswith('Warning')\n assert str(e) == 'Dummy exception'\n else:\n w = recwarn.pop()\n assert w.category.__name__.endswith('Warning')\n assert \"Dummy exception\" in str(w.message)\n assert recwarn.list == []", "def raises(f, e):\n try:\n f()\n except e:\n return True\n except Exception as e: # Any other exceptions are errors.\n return False\n return False", "def test_execute_monitor_exception():\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_MONITOR_GZIP_PAYLOAD)\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_MONITOR_URL, status=400, body=\"Bad Request in Monitor Call\"\n )\n with pytest.raises(Exception) as excinfo:\n worker.execute(message, TestData.RECEPTOR_CONFIG, queue.Queue())\n assert \"Bad Request in Monitor Call\" in str(excinfo.value)", "def test_exception_chaining(self) -> None:\n\n def f():\n raise IOError\n\n try:\n try:\n f()\n except IOError as ioe:\n # Create a new exception, chaining it to the caught exception\n raise RuntimeError(\"some i/o operation failed\") from ioe\n except RuntimeError as r:\n pass", "def raise_exception(request):\n raise Exception(\"Let's test error handling\")", "def test_exception(self):\n with self.assertRaises(Exception):\n ps.pairs([1, 1, 1, 2], 5)", "def test_doesNotSkipOnDifferentError(self):\n\n @doNotFailOnNetworkError\n def inner():\n self.assertEqual(\"Error!!!!\", \"\")\n\n try:\n inner()\n except Exception as e:\n self.assertIsInstance(e, FailTest)", "def test_reraise_single_trigger() -> None:\n with pytest.raises(TypeError):\n with reraise(RuntimeError(\"got value error\"), when=ValueError):\n raise TypeError(\"boom\")", "def test_raise_exception2(self):\n with self.assertRaises(Exception):\n SshpassBaseCommandBuilder(COMMAND)\\\n .set_password(SERVER_PASSWORD)\\\n .set_file(PASSWORD_FILE)\\\n .to_build()", "def test_passed_raise(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.assertEqual(-1, self.__report.nr_warnings(['raise'], 'high'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27raise%27')", "async def test_two_child_crashes():\n async def crasher(etype):\n raise etype\n\n with pytest.raises(MultiError) as excinfo:\n async with Nursery() as nursery:\n nursery.start_soon(crasher(KeyError))\n nursery.start_soon(crasher(ValueError))\n\n assert set(type(exc)\n for exc in excinfo.value.exceptions) == {ValueError, KeyError}", "def test_raise_exception(self):\n with self.assertRaises(Exception):\n SshpassBaseCommandBuilder(COMMAND).to_build()", "def test_raise_exception(self):\n with self.assertRaises(Exception):\n SshCommandBuilder(SERVER_USER, COMMAND).to_build()", "def test_attempt_exception(capsys):\n message = 'some message'\n exception = 'run-time-error'\n\n with pytest.raises(SystemExit):\n with attempt(message):\n raise RuntimeError(exception)\n\n captured = capsys.readouterr()\n assert captured.out == 'Info: {} [FAILED]\\n'.format(message)\n assert captured.err == 'Critical: {}\\n'.format(exception)", "def test_handle_raise_value_error(self) -> None:\n with pytest.raises(ValueError) as excinfo:\n FileLookup.handle(\"foo\")\n assert (\n str(excinfo.value) == \"Query 'foo' doesn't match regex: \"\n \"^(?P<codec>[base64|json|json-parameterized|parameterized|\"\n \"parameterized-b64|plain|yaml|yaml-parameterized]:.+$)\"\n )", "def test_scenarios_that_should_raise_errors(self, kwargs, auth):\n try:\n auth.load_creds(**kwargs)\n # raises ValueError (zero length field name in format) for python 2.6\n # OSError for the rest\n except (OSError, ValueError):\n pass\n except Exception as e:\n pytest.fail(\"Unexpected exception thrown: %s\" % e)\n else:\n pytest.fail(\"OSError exception not thrown.\")", "def test_attempt_exception_traceback(capsys):\n message = 'some message'\n exception = 'run-time-error'\n\n with pytest.raises(SystemExit):\n with attempt(message, include_traceback=True):\n raise RuntimeError(exception)\n\n captured = capsys.readouterr()\n assert captured.out == 'Info: {} [FAILED]\\n'.format(message)\n assert captured.err.startswith('Critical: {}\\n'.format(exception))\n assert 'Traceback' in captured.err", "def test_reraise_multiple_triggers() -> None:\n with pytest.raises(TypeError):\n with reraise(RuntimeError(\"got value error\"), when=(ValueError, KeyError)):\n raise TypeError(\"boom\")", "def test_exceptions_init_valid():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'valid_exceptions.yaml'))\n assert exceptions.exceptions", "def test_error_handler_exception(tmpdir):\n logger = logging.getLogger('shapely.geos')\n logfile = str(tmpdir.join('test_error.log'))\n fh = logging.FileHandler(logfile)\n logger.addHandler(fh)\n\n # This calls error_handler with a format string of \"%s\" and one\n # value.\n with pytest.raises((ReadingError, pygeos.GEOSException)):\n loads('POINT (LOLWUT)')\n\n log = open(logfile).read()\n assert \"Expected number but encountered word: 'LOLWUT'\" in log", "def get_exception():\n raise Exception(\"example\")", "def test_commandRaisesUnhandledException(self):\n\n class UnhandledException(Exception):\n \"\"\"\n An unhandled exception.\n \"\"\"\n\n self.assertCommandExceptionResponse(\n UnhandledException(\"unhandled\"),\n b\"001\", b\"BAD Server error: unhandled\\r\\n\",\n )\n\n self.assertTrue(self.flushLoggedErrors(UnhandledException))", "def test_sum_integer_should_raise_exception(self):\n\n with self.assertRaises(TypeError):\n sum(1)", "def test_init_exception(self):\n with self.assertRaises(ZeroDivisionError): Fraction(10, 0)", "def check_raises(func, exception_type, msg=None):\n # type: (Callable[[], Any], Type[Exception], Optional[Str]) -> Exception\n msg = \": {}\".format(msg) if msg else \".\"\n try:\n func()\n except Exception as exc: # pylint: disable=W0703\n msg = \"Wrong exception [{!s}] raised instead of [{!s}]{}\" \\\n .format(type(exc).__name__, exception_type.__name__, msg)\n assert isinstance(exc, exception_type), msg\n return exc\n raise AssertionError(\"Exception [{!s}] was not raised{}\".format(exception_type.__name__, msg))", "def test_segfault(self, *args, **kw):\n import aio\n\n self.assertRaises(IOError, aio.Queue, -1)\n self.assertRaises(IOError, aio.Queue, -0)\n self.assertRaises(IOError, aio.Queue, sys.maxint)\n\n q = aio.Queue()\n self.assertEquals(q.processEvents(), None)\n self.assertRaises(IOError, q.processEvents, minEvents = -1, maxEvents = -1, timeoutNSec = -1)\n self.assertRaises(IOError, q.scheduleRead, 0, 0, 0, 4096)\n self.assertRaises(IOError, q.scheduleRead, -1, 0, 1, 4096)", "def test_exception_class_hierarchy(self) -> None:\n\n try:\n raise CustomDerivedError(state=\"test\")\n except CustomDerivedError as cex:\n assert type(cex) is CustomDerivedError\n assert \"test\" == cex.state\n except CustomError as cex:\n assert False, \"CustomDerivedError should have caught the exception.\"\n except:\n assert False, f\"Unhandled exception: {sys.exc_info()[0]}\"", "def test_connection_fail(context_fixture):\n with pytest.raises(SystemExit):\n context_fixture('RequestException')", "def test_handle_calls(tchannel_pair):\n class _MyException(Exception):\n pass\n\n def my_handler(context, connection):\n raise _MyException()\n\n server, client = tchannel_pair\n client.ping()\n with pytest.raises(_MyException):\n server.handle_calls(my_handler)", "def test_traceback(self, logger):\n stdlib_logger = logging.getLogger(\"eliot-test2\")\n stdlib_logger.setLevel(logging.DEBUG)\n handler = EliotHandler()\n stdlib_logger.addHandler(handler)\n try:\n raise RuntimeError()\n except Exception as e:\n exception = e\n expected_traceback = traceback.format_exc()\n stdlib_logger.exception(\"ono\")\n message = logger.messages[0]\n assertContainsFields(\n self,\n message,\n {\n \"message_type\": \"eliot:stdlib\",\n \"log_level\": \"ERROR\",\n \"message\": \"ono\",\n \"logger\": \"eliot-test2\",\n },\n )\n assert_expected_traceback(\n self, logger, logger.messages[1], exception, expected_traceback\n )", "def test_150(self):\n self.assertRaises(\n exceptions.DataONEExceptionException, exceptions.deserialize,\n INVALID_ERROR_DOC[0]\n )", "def test_valid_python_raise_exception(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_raise\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_raise --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_raise test\")\n\n assert(rtn.return_code == 246)", "def test_expect_err_raising(self, exc_cls: t.Type[Exception]) -> None:\n exp_exc: t.Type[Exception] = exc_cls if exc_cls else RuntimeError\n kwargs = {\"exc_cls\": exc_cls} if exc_cls else {}\n msg = \"not what I expected\"\n\n with pytest.raises(exp_exc) as exc_info:\n Ok(2).expect_err(msg, **kwargs)\n\n assert msg in str(exc_info.value)", "def test_http_error(self):\n self.assertRaises(HTTPError, lambda: self.d.artist(0).name)\n\n try:\n self.d.artist(0).name\n except HTTPError as e:\n self.assertEqual(e.status_code, 404)\n self.assertEqual('404: Resource not found.', str(e))", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def test_exception_result(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n\n @c.run_in_reactor\n def raiser():\n 1 / 0\n\n result = raiser()\n self.assertIsInstance(result, EventualResult)\n self.assertRaises(ZeroDivisionError, result.wait, 0.1)" ]
[ "0.7859332", "0.7556377", "0.7296645", "0.71099347", "0.70711637", "0.6970611", "0.69576824", "0.6924376", "0.6830435", "0.6750002", "0.6712215", "0.67063254", "0.66900784", "0.6665721", "0.6664793", "0.6655494", "0.6646311", "0.66175795", "0.65986204", "0.65889573", "0.65824246", "0.6571166", "0.6568904", "0.6558685", "0.65577096", "0.6556983", "0.6555677", "0.65500337", "0.65476125", "0.652969", "0.651782", "0.6513542", "0.65077454", "0.6489736", "0.64802355", "0.6475277", "0.6474778", "0.6465442", "0.6462612", "0.6456448", "0.64521474", "0.64247465", "0.64177275", "0.64156187", "0.6405245", "0.63912773", "0.63761395", "0.6362554", "0.6361139", "0.63531137", "0.63529325", "0.6350354", "0.6341972", "0.6335704", "0.63175213", "0.63117707", "0.63058674", "0.6303945", "0.6303287", "0.6301225", "0.62982416", "0.629638", "0.6292832", "0.62922287", "0.6288767", "0.6288613", "0.62787044", "0.6277447", "0.6274533", "0.6262789", "0.62569493", "0.62557197", "0.6254095", "0.62503487", "0.6247757", "0.6247069", "0.6238206", "0.6235693", "0.6230826", "0.62281436", "0.6216081", "0.6211359", "0.6209259", "0.62090373", "0.6202429", "0.61943716", "0.6183952", "0.61830455", "0.61753064", "0.61723876", "0.61699086", "0.6167857", "0.61614805", "0.6155414", "0.6150677", "0.61470026", "0.6144677", "0.6144134", "0.6142548", "0.6138328", "0.6114996" ]
0.0
-1
Test that a valid message is sent to a valid webhook.
def test_valid_webhook(self, mock_send): send_notification("valid_webhook", self.message) mock_send.assert_called()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_message(self):\n messaging = {\n 'sender': {'id': '1331235'},\n 'recipient': {'id': '1111111'},\n 'message': {'text': 'Hello world.'}\n }\n event = self.create_message_event(messaging)\n c = Client()\n response = c.post(self.webhook, data=event, content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def test_webhook_empty_event(self):\n event = {\n 'body': json.dumps({})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))", "def test_validation(self):\n challenge = \"challenge-string\"\n data = {\n 'hub.mode': 'subscribe',\n 'hub.verify_token': settings.VERIFY_TOKEN,\n 'hub.challenge': challenge\n }\n c = Client()\n response = c.get(self.webhook, data=data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.content, 'utf-8'), challenge)", "def test_good_signature(post_data, expected_error_message, settings, rf):\n app_key = '123appkey'\n request_signature = compute_request_signature(app_key, post_data)\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, True)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n post_data,\n content_type='application/json',\n HTTP_X_GAPI_SIGNATURE=request_signature)\n\n response = view(request)\n if expected_error_message is None:\n assert response.status_code == 200\n else:\n assert response.status_code == 400\n assert response.content == expected_error_message", "def test_bot_message():\n send_json_message_to_bot(request.get_json())\n return \"ok\"", "def test_postMessage(self): #GIVEN the appropriate environment variables are configured\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n status = testBot.postMessage('Zygium') #WHEN the bot posts a message\n self.assertTrue(status == 202) # a status code of 202 should be returned", "def test_bad_signature(fail_on_mismatch, settings, rf):\n app_key = '123appkey'\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, fail_on_mismatch)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n GOOD_EVENT_LIST_JSON,\n content_type='application/json')\n\n response = view(request)\n if fail_on_mismatch:\n assert response.status_code == 400\n assert response.content == ErrorMessages.INVALID_SIGNATURE\n else:\n assert response.status_code == 200", "def test_post_empty_data(self):\n response = self.app.post('/_ah/push-handlers/receive_message')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.body, \"No request body received\")\n self.assertRaises(ValueError)", "def test_validate_post(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': 'jeff@jeffknupp.com',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_post_invalid(self):\n sender = UserFactory()\n data = {\n 'senderId': sender.id,\n 'recipientId': 999,\n 'text': '...'\n }\n\n response = self.client.post(\n reverse('messages:list'),\n content_type='application/json',\n data=data,\n )\n self.assertEqual(400, response.status_code)", "async def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count", "async def test_send(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n http_message = HttpMessage(\n dialogue_reference=(\"\", \"\"),\n target=0,\n message_id=1,\n performative=HttpMessage.Performative.REQUEST,\n method=\"get\",\n url=\"/\",\n headers=\"\",\n body=\"\",\n version=\"\",\n )\n envelope = Envelope(\n to=\"addr\",\n sender=\"my_id\",\n message=http_message,\n )\n with patch.object(self.webhook_connection.logger, \"warning\") as mock_logger:\n await self.webhook_connection.send(envelope)\n await asyncio.sleep(0.01)\n mock_logger.assert_any_call(\n RegexComparator(\n \"Dropping envelope=.* as sending via the webhook is not possible!\"\n )\n )", "def test_command_trigger_webhook_post(self):\n pass", "def test_slackWH_send_badAuth(get_slackwebhook):\n s = get_slackwebhook\n s.url = 'https://hooks.slack.com/services/badAuthCreds'\n with pytest.raises(MessageSendError):\n s.send()", "def test_uptimerobot_invalid_payload_with_missing_data(self) -> None:\n self.url = self.build_webhook_url()\n payload = self.get_body(\"uptimerobot_invalid_payload_with_missing_data\")\n result = self.client_post(self.url, payload, content_type=\"application/json\")\n self.assert_json_error(result, \"Invalid payload\")\n\n expected_message = MISCONFIGURED_PAYLOAD_ERROR_MESSAGE.format(\n bot_name=self.test_user.full_name,\n support_email=FromAddress.SUPPORT,\n ).strip()\n\n msg = self.get_last_message()\n self.assertEqual(msg.content, expected_message)\n self.assertEqual(msg.recipient.type, Recipient.PERSONAL)", "def test_incorrect_token_post(self): \n request = self.build_request(token=\"incorrect_token\")\n response = self.app.post('/_ah/push-handlers/receive_message',json.dumps(request).encode('utf-8'),content_type=\"application/json\")\n self.assertEqual(response.status_int, 200)\n self.assertRaises(ValueError)", "async def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "def test_webhook_bad_signature(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Invalid Travis CI webhook signature for status update %d.'\n % self.status_update.pk)", "def test_save_check_data(client):\n\n del proto_reminder['message']\n res = client.post('/api/reminders', json=proto_reminder)\n assert res.status_code == 400", "def post_message(webhook_url, message):\n try:\n r = requests.post(webhook_url, json=message)\n return (True, 'success')\n except Exception as e:\n return (False, 'Encountered exception:\\n' + render_exception(e))", "def test_invalid_form_message(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': ''})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))", "def test_invalid_form_message(self):\n response = self.client.post(self.get_url(self.trait.pk), {'tag': '', })\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertTrue('Oops!' in str(messages[0]))", "def test_webhook_unkown_action(self):\n event = {\n \"body\": json.dumps({\n \"queryResult\": {\n \"action\": \"1manage_bmi\"\n }})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))", "def test_webhook_build_error(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n 'build_url': 'https://example.com/build',\n 'state': 'failed',\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 200)\n\n self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)\n self.assertEqual(self.status_update.url, 'https://example.com/build')\n self.assertEqual(self.status_update.state,\n StatusUpdate.DONE_FAILURE)", "async def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "async def test_mailgun_webhook_event_with_correct_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_with_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"", "async def test_receive_post_ok(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n payload = {\"hello\": \"world\"}\n call_task = self.loop.create_task(self.call_webhook(\"test_topic\", json=payload))\n envelope = await asyncio.wait_for(self.webhook_connection.receive(), timeout=10)\n\n assert envelope\n\n message = cast(HttpMessage, envelope.message)\n dialogue = self.skill_dialogues.update(message)\n assert dialogue is not None\n assert message.method.upper() == \"POST\"\n assert message.body.decode(\"utf-8\") == json.dumps(payload)\n await call_task", "async def test_webhook_endpoint_unauthorized_update_doesnt_generate_telegram_text_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n unauthorized_update_message_text,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_text\")\n\n response = await client.post(\n TELEGRAM_WEBHOOK_URL, json=unauthorized_update_message_text\n )\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure any events would have fired\n await hass.async_block_till_done()\n\n assert len(events) == 0", "def test_webhook_no_env(self):\n payload = json.dumps({})\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(rsp.content, b'Got event without an env in config.')", "def test_standup_send_invalid_channel (url, _pre_setup):\n\n token = _pre_setup[0]['token']\n\n standup_send_data = {\n 'token': token,\n 'channel_id': 99999,\n 'message': \"message\"\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "def test_messages(client, test_db):\n login(client, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n rv = client.post(\n \"/add\",\n data=dict(title=\"<Hello>\", text=\"<strong>HTML</strong> allowed here\"),\n follow_redirects=True,\n )\n assert b\"No entries here so far\" not in rv.data\n assert b\"&lt;Hello&gt;\" in rv.data\n assert b\"<strong>HTML</strong> allowed here\" in rv.data", "def test_invalid_webhook(self, mock_send):\n logging.disable(logging.CRITICAL) # Don't log to stderr during this unit test\n mock_send.side_effect = OSError(\"Some error\")\n send_notification(\"invalid_webhook\", self.message)\n mock_send.assert_called()\n logging.disable(logging.NOTSET) # Reset the logging", "def test_send_message(self):\n\n typhoonae.websocket.send_message('1', 'My first message.')\n\n self.assertRaises(\n typhoonae.websocket.BadArgumentError,\n typhoonae.websocket.send_message, 1, 'My second message.')\n\n self.assertRaises(\n typhoonae.websocket.BadArgumentError,\n typhoonae.websocket.send_message, [None], 'My second message.')", "def test_invalid_JSON_returns_error(self):\n\n response = self.client.post(\n reverse('transcript:record_telegram'),\n content_type='application/json',\n data='''{\"something\":''')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.content, b\"Could not parse JSON\")\n self.assertEqual(Message.objects.count(), 0)", "def test_accepted_with_message(self):\n response = {\"status_code\": 202, \"content\": \"something's wrong\"}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertEqual(self.mock_send_mail.call_count, 1)", "def test_message_user():", "def test_valid_send_data(self):\n data = {'text': 'hello!',\n 'recipients': [self.contact.id]}\n form = MessageForm(data)\n self.assertTrue(form.is_valid())\n recipients = form.send()\n self.assertTrue(self.contact in recipients)\n self.assertEqual(self.outbound[0].text, data['text'])", "def testNonJSONPayload(self):\n body = 'Invalid JSON'\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual(JSONRPC_PARSE_ERROR, response['error']['code'])\n message = 'Payload was not valid JSON.'\n self.assertEqual(message, response['error']['message'])\n self.assertIn(message, self.log.getvalue())\n self.assertIn('Request payload: Invalid JSON.', self.log.getvalue())", "def test_accepted_with_no_message(self):\n response = {\"status_code\": 202, \"content\": \"\"}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertFalse(self.mock_send_mail.called)", "def test_validate_post_existing_resource(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': 'jknupp@gmail.com',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_post_error_status_code(self):\n response = self.app.post('/mq_message', data={})\n assert response.status_code == 400", "def test_webhook_bad_status_update(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % (self.status_update.pk + 1),\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Unable to find matching status update ID %d.'\n % (self.status_update.pk + 1))", "def test_standup_send_token_invalid(url, _pre_setup):\n\n invalid_token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJlbWFpbCI6IiJ9.'\\\n 'xHoCwEdcs3P9KwoIge-H_GW39f1IT3kECz_AhckQGVU'\n\n standup_send_data = {\n 'token': invalid_token,\n 'channel_id': _pre_setup[2],\n 'message': \"message\"\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "def test_chat_send_message(self):\n body = SendMessageRequest()\n response = self.client.open(\n '/api/chat/send_message',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_parse_valid_request(self):\n request = self.factory.post(self.url, data=mailgun_payload)\n email = self.parser.parse(request)\n self._assertEmailParsedCorrectly(email, mailgun_payload)", "def test_contact_us_without_email(client, new_msg):\n del new_msg[\"email\"]\n rv = client.post(\"/api/send-email/\", json=new_msg)\n response = rv.get_json()[\"message\"]\n\n assert rv.status_code == HTTPStatus.BAD_REQUEST\n assert response[\"email\"][\"message\"] == \"Valid email is required\"", "def verify_payload():\n return True", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def test_contact_us_without_content(client, new_msg):\n del new_msg[\"content\"]\n rv = client.post(\"/api/send-email/\", json=new_msg)\n response = rv.get_json()[\"message\"]\n\n assert rv.status_code == HTTPStatus.BAD_REQUEST\n assert response[\"content\"][\"message\"] == \"Content of message is required\"", "def ttest_messages(self):\n self.login(\n app.config['USERNAME'],\n app.config['PASSWORD']\n )\n rv = self.app.post('/add', data=dict(\n title='<Hello>',\n text='<strong>HTML</strong> allowed here'\n ), follow_redirects=True)\n assert b'No entries here so far' not in rv.data\n assert b'&lt;Hello&gt;' in rv.data\n assert b'<strong>HTML</strong> allowed here' in rv.data", "def test_invalid_email(self):\n data = self.valid_payload\n data['email_exact'] = 'asdasd'\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_email(self):\n data = self.valid_payload\n data['email_exact'] = 'asdasd'\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_standup_send_non_member(url, _pre_setup):\n\n token_1, channel_2 = _pre_setup[0]['token'], _pre_setup[3]\n\n standup_send_data = {\n 'token': token_1,\n 'channel_id': channel_2,\n 'message': \"message\"\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "def test_message(self):\n\n message = typhoonae.websocket.Message(\n {'from': 0, 'body': 'Message body'})\n\n self.assertEqual(0, message.socket)\n self.assertEqual('Message body', message.body)", "def test_validate_patch(client):\n response = client.patch(\n '/user/1',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "async def test_webhook_platform_init(hass: HomeAssistant, webhook_platform) -> None:\n assert hass.services.has_service(DOMAIN, SERVICE_SEND_MESSAGE) is True", "def test_websocket_message(self):\n\n ws = websocket.create_connection(self.live_server_ws_url)\n ws.send('test')\n response = ws.recv()\n ws.close()\n assert 'test' == response", "async def test_webhook_endpoint_generates_telegram_text_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_message_text,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_text\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_message_text)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"text\"] == update_message_text[\"message\"][\"text\"]", "def test_post_success_creates_message(self):\n sender, recipient = UserFactory(), UserFactory()\n\n data = {\n 'senderId': sender.id,\n 'recipientId': recipient.id,\n 'text': 'Hello World!',\n }\n\n response = self.client.post(\n reverse('messages:list'),\n content_type='application/json',\n data=data,\n )\n actual_message = Message.objects.get()\n self.assertEqual(sender.id, actual_message.sender.id)\n self.assertEqual(recipient.id, actual_message.recipient.id)\n self.assertEqual(data['text'], actual_message.text)", "def send(self):\n payload = self.format_payload()\n\n # Makes sure that the required fields are provided before\n # sending the payload.\n if not self.webhook_url:\n print ('Error: Webhook URL is required.')\n\n elif not payload:\n print ('Error: Message payload cannot be empty.')\n\n else:\n try:\n request = requests.post(self.webhook_url,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n\n request.raise_for_status()\n\n except requests.exceptions.RequestException as error:\n print('Error: %s' % error)", "def test_post_invalid_data(self):\n data = {\n 'week_day': 'd',\n 'time': 'd'\n }\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.post(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "async def test_mailgun_webhook_with_different_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=b\"random_api_key\",\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count", "def test_valid_dweet(self):\n dweet = dweepy.dweet(test_data)\n check_valid_dweet_response(self, dweet)", "def test_message_events_that_are_ignored_by_handler(\n post_message,\n create_ticket,\n close_ticket,\n get_ticket,\n ignored_subtype,\n log,\n db\n):\n slack_client = MagicMock()\n zendesk_client = MagicMock()\n workspace_uri = 'https://s.l.a.c.k'\n zendesk_uri = 'https://z.e.n.d.e.s.k'\n user_id = '100000000001'\n group_id = '200000000002'\n slack_client.users_info.return_value = {}\n payload = {\n 'channel': 'C019JUGAGTS',\n 'subtype': ignored_subtype,\n 'ts': '1597937653.011100'\n }\n is_handled = handler(\n payload,\n our_channel='C019JUGAGTS',\n workspace_uri=workspace_uri,\n zendesk_uri=zendesk_uri,\n slack_client=slack_client,\n zendesk_client=zendesk_client,\n user_id=user_id,\n group_id=group_id,\n )\n assert is_handled is False\n slack_client.users_info.assert_not_called()", "def test_message_post_fail(self):\r\n\r\n self.mock_xqueue.send_to_queue.return_value = (1, \"Not Queued\")\r\n\r\n feedback_post = {\r\n 'feedback': 'feedback text',\r\n 'submission_id': '1',\r\n 'grader_id': '1',\r\n 'score': 3\r\n }\r\n result = self.openendedmodule.message_post(feedback_post, self.test_system)\r\n self.assertFalse(result['success'])\r\n\r\n state = json.loads(self.openendedmodule.get_instance_state())\r\n self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)", "def test_must_be_subbed_to_send(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n # Create Saxony as an invite-only stream.\n self.assert_json_success(\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n )\n\n cordelia = self.example_user(\"cordelia\")\n with self.assertRaises(JsonableError):\n self.send_stream_message(cordelia, \"Saxony\")", "def test_invalid_data(self, client):\n data = {\n 'username': '*' * 255,\n 'birthday': 'test'\n }\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'Enter a valid date.' in str(response.content)\n assert 'Ensure this value has at most 150 characters (it has 255).' in str(response.content)", "def test_bot_triggered_event(self):\n lh = LambdaHandler(\"tests.test_bot_handler_being_triggered\")\n # from : https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-lex\n event = {\n \"messageVersion\": \"1.0\",\n \"invocationSource\": \"DialogCodeHook\",\n \"userId\": \"user-id specified in the POST request to Amazon Lex.\",\n \"sessionAttributes\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n },\n \"bot\": {\"name\": \"bot-name\", \"alias\": \"bot-alias\", \"version\": \"bot-version\"},\n \"outputDialogMode\": \"Text or Voice, based on ContentType request header in runtime API request\",\n \"currentIntent\": {\n \"name\": \"intent-name\",\n \"slots\": {\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n },\n \"confirmationStatus\": \"None, Confirmed, or Denied (intent confirmation, if configured)\",\n },\n }\n\n response = lh.handler(event, None)\n\n self.assertEqual(response, \"Success\")", "def test_alice_sent(self):\n messages = list(self.alice_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.alice_message_to_bob, messages)", "def testInvalidData(self):\n data = {\n \"title\": 32,\n \"rent\": 700\n }\n\n response = self.client.post(\"/api/posts\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 422)\n\n data = json.loads(response.data)\n\n self.assertEqual(data[\"message\"], \"32 is not of type 'string'\")", "def slack_me(msg):\n # sanitise.\n msg = unicodedata.normalize('NFKD',msg).encode('ascii','ignore').decode('ascii')\n msg = re.sub('[^\\w\\s\\-.,;?!@#()\\[\\]]','', msg)\n r = requests.post(url=os.environ['SLACK_WEBHOOK'],\n headers={'Content-type': 'application/json'},\n data=f\"{{'text': '{msg}'}}\")\n if r.status_code == 200 and r.content == b'ok':\n return True\n else:\n return False", "def verify_error_message(self, response, error_message):\n self.assertEqual(response.status_code, 400)\n response = json.loads(response.content.decode('utf-8'))\n self.assertIn('error', response)\n self.assertEqual(response['error'], error_message)", "def testEmailRequired(self):\r\n res = self.app.post('/signup_process')\r\n self.assertIn('Please supply', res.body)", "def test_post_success_response(self):\n sender, recipient = UserFactory(), UserFactory()\n\n data = {\n 'senderId': sender.id,\n 'recipientId': recipient.id,\n 'text': 'Hello World!',\n }\n\n response = self.client.post(\n reverse('messages:list'),\n content_type='application/json',\n data=data,\n )\n actual_data = json.loads(response.content)\n\n self.assertEqual(201, response.status_code)\n self.assertEqual(data['senderId'], actual_data['sender']['id'])\n self.assertEqual(data['recipientId'], actual_data['recipient']['id'])\n self.assertEqual(data['text'], actual_data['text'])", "def test_submit_reason_success(self):\n data = {'other_reason': 'I dont like the color.'}\n response = self.uriel.post('/submit_reason', data, follow=True)\n self.assertRedirects(response, '/', 302)", "def verify_as_target(self, message_handler):", "def test_send_message_fitting(bot_arg):\n msg = 'hello world'\n send_message(msg, bot_arg, 1)\n assert bot_arg.msg_log[0] == msg", "async def test_webhook_endpoint_generates_telegram_callback_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_callback_query,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_callback\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_callback_query)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"data\"] == update_callback_query[\"callback_query\"][\"data\"]", "def test_post(self):\n url = reverse('events:EventView')\n response = self.client.post(url, self.valid_payload,format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Events.objects.count(), 1)\n\n invalid_response = self.client.post(url, self.invalid_payload, format='json')\n self.assertEqual(invalid_response.status_code, status.HTTP_400_BAD_REQUEST)", "async def test_webhook_endpoint_generates_telegram_command_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_message_command,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_command\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_message_command)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"command\"] == update_message_command[\"message\"][\"text\"]", "def test_post__prod(self, mock_emailmessage_constructor):\n params = {\n 'to': self.to,\n 'subject': self.subject,\n 'html': self.html,\n }\n with notifier.app.test_request_context(self.request_path, json=params):\n actual_response = self.handler.process_post_data()\n\n mock_emailmessage_constructor.assert_called_once_with(\n sender=self.sender, to=self.to, subject=self.subject,\n html=self.html)\n mock_message = mock_emailmessage_constructor.return_value\n mock_message.check_initialized.assert_called_once_with()\n mock_message.send.assert_called_once_with()\n self.assertEqual({'message': 'Done'}, actual_response)", "def test_init_with_valid_body(self):\n body = {'event_name': 'job.created'}\n message = SQSMessage(self.schema, body=body)\n\n assert isinstance(message, SQSMessage)\n assert message.body == body", "async def test_api_fire_event_with_invalid_json(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_bad_data\", listener)\n\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps(\"not an object\")\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0\n\n # Try now with valid but unusable JSON\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps([1, 2, 3])\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0", "def test_guests_can_not_post_message(self):\n url = reverse('posts-list')\n data = {'title': 'some title', 'body': 'somebody :P'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_error_messages(self):\n user = self.create_user()\n user_id = user[0]\n question_id = self.create_question(int(user_id))[0]\n path = \"/api/v2/questions/{}/answers\".format(question_id)\n auth_token = user[1]\n empty_req = self.client.post(path,\n headers=dict(Authorization=\"Bearer {}\".format(auth_token)),\n data={})\n self.assertEqual(empty_req.status_code, 400)\n empty_req = self.post_data(question_id=question_id, data={\"\":\"\"})\n self.assertEqual(empty_req.status_code, 400)", "def test_create_story_invalid_form_data(self):\n res = self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps({}))\n self.assertEqual(res.status_code, 400)\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Required fields are missing or invalid')", "def test_falsepositive_post(client):\n g.test_authorized_for = []\n res = client.post(\"/v0/falsepositive\", json=post_json_data)\n expected_response = '{\"msg\":\"Thanks! We\\\\u2019ve marked this as a false positive\",' '\"status\":\"ok\"}'\n assert expected_response in res.data.decode(\"utf-8\")", "def process_webhook(self):\n if self.token:\n self.verify = VerificationMethod.TOKEN\n if self.secret:\n self.verify = VerificationMethod.HMAC\n return True", "def test_update_telegram_id_if_validation_fail(self):\n test_data = {'telegram_id': 'test_token'}\n url = reverse('telegram_id')\n response = self.client.put(url, json.dumps(test_data), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def validate_message(self, state_id, msg):\n pass", "def test_user_not_in_conversation(self):\n self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_b.id,\n \"text\": \"test message\"\n }, format='json')\n response = self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_c.id,\n \"text\": \"test message\"\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Message.objects.count(), 1)", "def test_handle_invalid(self):\n # setup\n http_dialogue = self.prepare_skill_dialogue(\n dialogues=self.http_dialogues,\n messages=self.list_of_messages[:1],\n )\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message_for_skill_dialogue(\n dialogue=http_dialogue,\n performative=HttpMessage.Performative.RESPONSE,\n version=self.version,\n status_code=self.status_code,\n status_text=self.status_text,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n mock_logger.assert_any_call(\n logging.WARNING,\n f\"cannot handle http message of performative={incoming_message.performative} in dialogue={http_dialogue}.\",\n )", "def _is_message_valid(message):\n return isinstance(message, ev_envelope.Envelope)", "def test_invalid_verify_post_request(self, cred):\n resp = requests.post(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 400", "def test_init_with_invalid_body(self):\n body = {'foo': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert \"{'event_name': 'Required'}\" in str(excinfo.value)\n\n body = {'event_name': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)\n\n body = {'event_name': 'job.'}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)", "def test_webhook_build_success(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n 'build_url': 'https://example.com/build',\n 'state': 'passed',\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 200)\n\n self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)\n self.assertEqual(self.status_update.url, 'https://example.com/build')\n self.assertEqual(self.status_update.state,\n StatusUpdate.DONE_SUCCESS)", "def test_post__staging(self, mock_emailmessage_constructor):\n params = {\n 'to': self.to,\n 'subject': self.subject,\n 'html': self.html,\n }\n with notifier.app.test_request_context(self.request_path, json=params):\n actual_response = self.handler.process_post_data()\n\n expected_to = 'cr-status-staging-emails+user+example.com@google.com'\n mock_emailmessage_constructor.assert_called_once_with(\n sender=self.sender, to=expected_to, subject=self.subject,\n html=self.html)\n mock_message = mock_emailmessage_constructor.return_value\n mock_message.check_initialized.assert_called_once_with()\n mock_message.send.assert_called_once_with()\n self.assertEqual({'message': 'Done'}, actual_response)", "def test_post_invalid_data_question(self):\n\n response = self.post_question(self.invalid_question)\n\n self.assertEqual(response.status_code, 400)", "def test_incomplete_parameters(self):\n response = self.client.post(\n reverse(\"validate_cast_member\"),\n {'name': None,\n 'role': None,},\n )\n self.assertEqual(response.status_code, 400)\n response = self.client.post(\n reverse(\"validate_cast_member\"),\n {'name': \"Pepe X\",\n 'role': None,},\n )\n self.assertEqual(response.status_code, 400)\n response = self.client.post(\n reverse(\"validate_cast_member\"),\n {'name': None,\n 'role': \"Fotografía\",},\n )\n self.assertEqual(response.status_code, 400)" ]
[ "0.73162293", "0.72650135", "0.70089185", "0.6989183", "0.6965128", "0.6857879", "0.68053627", "0.67763484", "0.6760223", "0.6692867", "0.66774166", "0.66758436", "0.665029", "0.66362166", "0.66284573", "0.65982944", "0.65941006", "0.6593365", "0.65475965", "0.6529135", "0.652329", "0.6470273", "0.64480865", "0.6419158", "0.6418783", "0.6416242", "0.6406553", "0.63935274", "0.63690674", "0.6365155", "0.6363196", "0.63503015", "0.63091314", "0.62870026", "0.62766623", "0.62324244", "0.61830854", "0.61773026", "0.6163192", "0.61494416", "0.6146117", "0.61357975", "0.61296946", "0.61261094", "0.61190546", "0.61057115", "0.6103836", "0.60985035", "0.60623795", "0.6056328", "0.6044396", "0.6037882", "0.6037882", "0.60360533", "0.60208404", "0.6019305", "0.60180193", "0.60029954", "0.5996269", "0.59888136", "0.59859073", "0.5980809", "0.5962673", "0.5955936", "0.5951602", "0.59480965", "0.5936768", "0.5924723", "0.59236366", "0.5922554", "0.59114593", "0.590937", "0.5902435", "0.58940214", "0.5891231", "0.588458", "0.588426", "0.588302", "0.58753455", "0.58749086", "0.5872867", "0.5871733", "0.5870204", "0.586874", "0.58670676", "0.58485407", "0.5847453", "0.5844934", "0.58389455", "0.58317894", "0.5809154", "0.5805482", "0.5800919", "0.57936025", "0.5786004", "0.5785493", "0.57850885", "0.57818705", "0.5779709", "0.57784814" ]
0.7538486
0
Provide a default report for the rest of the class.
def setUp(self): self.report = dict(title="Report 1", url="https://report1") self.data_model = dict( metrics=dict(metric_type=dict(name="type")), sources=dict( quality_time=dict( parameters=dict( status=dict( api_values={ "target met (green)": "target_met", "near target met (yellow)": "near_target_met", "target not met (red)": "target_not_met", "technical debt target met (grey)": "debt_target_met", "unknown (white)": "unknown", } ) ) ) ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_report(self):\n raise NotImplementedError", "def report():\n pass", "def report(self, report_options=None):\n raise NotImplementedError()", "def report(self, **options):\n pass", "def init_report(self, report):\n report.text('warning', 'init_report() not implemented for this class.')", "def _report():\n return {\n 'type' : 'class',\n 'name' : 'report',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('evaluation', 'quality.evaluation', '1.1', None),\n ('evaluator', 'shared.responsible_party', '0.1', None),\n ('measure', 'quality.measure', '1.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:dateTime/gco:DateTime'),\n ('evaluation', 'self::cim:report'),\n ('evaluator', 'child::cim:evaluator'),\n ('measure', 'self::cim:report/cim:measure'),\n ]\n }", "def report(self) -> Any:", "def get_report(self):\n raise NotImplementedError('Agent is an abstract base class')", "def name(self):\n return 'Report'", "def buildReports(self):\n pass", "def generate_report(self) -> Report:\n # equity_curve = self._generate_equity_curve()\n # summary_stats = self._generate_summary_stats(equity_curve)\n # return Report(equity_curve, summary_stats)\n pass", "def reporting(self):\r\n return reporting.Reporting(self)", "def _populate_default_report(self):\n # Read the default value of the report from a file.\n with open('data/default_report.json', encoding='utf-8') as f:\n data = json.load(f)\n return data", "def report(self, output_dir):", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def gReport(self, event):\n \n reports.createReports()", "def reporter(self):\n return NotImplementedError(\"Must implement in frontend subclass.\")", "def display_reports(self, layout): # pylint: disable=arguments-differ", "def default_report(self, print_: bool=False) -> Dict[str, Dict[str, float]]:\n\n self.check_known = True\n self.check_unknown = True\n\n report = {type_: defaultdict(float) for type_ in self.types}\n\n for type_ in self.types:\n report[type_]['precision'] = self.precision(type_)\n report[type_]['recall'] = self.recall(type_)\n report[type_]['f1_score'] = self.f1_score(type_)\n report[type_]['num'] = self.num_of_ner(type_)\n\n for type_ in self.types:\n report[type_] = dict(report[type_])\n\n if print_:\n self._print_report(report)\n\n return report", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def report_data(self):\n return {}", "def GenerateReport(self, plugin):\n raise 'Method not implemented'", "def test_basic_usage(self):\n self._test_reports_helper({}, [\"report.txt\"])", "def initialize_reporting(self):\n reporting_params = self.reporting_params\n reporting_params[\"heartbeat_path\"] = self.result_paths[\"current_heartbeat\"]\n reporting_handler = ReportingHandler(**reporting_params)\n\n #################### Make Unified Logging Globally Available ####################\n G.log = reporting_handler.log\n G.debug = reporting_handler.debug\n G.warn = reporting_handler.warn", "def _gen_report(self):\n print \"------------------------------------------\"\n print \"fio report\"\n print \"------------------------------------------\"\n print \"name\", \" \".join(f for f in FIELDS)\n # print fields\n for name in sorted(self.reports):\n report = self.reports[name]\n #print report\n print name, \" \".join(str(report.get(f)) for f in FIELDS)\n\n print \"*******************************************\"\n # print clats\n index = 0\n for name in sorted(self.reports):\n report = self.reports[name]\n if index == 0:\n print \"clat_percent\", \" \".join(\n str(c[0]) for c in report[\"clats\"])\n print name, \" \".join(str(c[1]) for c in report[\"clats\"])\n index += 1", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def report(self):\r\n # Compose the list of report_column names required for \r\n # summary_report.dsw.DictWriter()\r\n sr = self.summary_report\r\n dict_leader = sr.dict_leader\r\n dict_out = sr.dict_out\r\n report_column_names = []\r\n if dict_leader is not None and dict_out is not None:\r\n for key,value in dict_leader.iteritems():\r\n #print \"Adding report_column_name(from dict_leader)=\",key\r\n report_column_names.append(key)\r\n dict_out[key] = value\r\n # We have to initialize the DictWriter with the report_column_names\r\n # below. \r\n # Also need matched coord_val and var names for calling node_report()\r\n # below,\r\n # so we do this duplication of storage of names. \r\n coord_var_names = []\r\n coord_val_names = []\r\n for idx, column_name in enumerate(self.column_names):\r\n var_name = \"Var_%s\" % str(idx+1)\r\n report_column_names.append(var_name)\r\n coord_var_names.append(var_name)\r\n val_name = \"Val_%s\" % str(idx+1)\r\n report_column_names.append(val_name)\r\n coord_val_names.append(val_name)\r\n # Add the entry report_column_names\r\n report_column_names += self.EntryClass.report_column_names\r\n # Instantiate dsw.DictWriter with report column names\r\n # 4 lines follow for quick test output\r\n columns_string = \"\"; sep = \"\"\r\n for i,cn in enumerate(report_column_names):\r\n columns_string += sep + cn\r\n sep = \", \"\r\n if sr.dsw_full_report is not None:\r\n # Instantiate the dict writer to write only one-row at a time,\r\n # rather than buffer the entire report in memory before\r\n # outputting, to reduce memory footprint of \r\n # large reports.\r\n # The caller assumes responsibility to sort such a large report \r\n # as needed, and to produce a view of only the 'max_bad' rows, \r\n # if needed; for example, by loading the full report\r\n # into a sql table and after it is populated by this routine, \r\n # using its facilities to sort and manipulate the report rows.\r\n dict_writer = (self.summary_report.dsw_full_report\r\n .dict_writer(report_column_names))\r\n if sr.write_header: \r\n # write the header row\r\n dict_writer.writeheader()\r\n else:\r\n dict_writer = None\r\n # Accrue output data values for a buffered report, separate from a \r\n # report that node_report may write, row by row, using dict_writer. \r\n # The output collected here may be further quickly sorted and \r\n # examined without having to reread the file that dict_writer \r\n # writes to.\r\n # Coord data output is formatted in node_report().\r\n # node_report() adds final entries column data to dict_out for \r\n # node coords and entry, and\r\n # if an entry has output, calls dict_writer to write it.\r\n is_entry, outrows = self.node_report(\r\n self.grand, \r\n dict_out=self.summary_report.dict_out, \r\n dict_writer=dict_writer,\r\n coord_var_names=coord_var_names, \r\n coord_val_names=coord_val_names)\r\n return outrows", "def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)", "def ReportNumbers(self, default=[None]):\n return self.data.get('report_numbers', default)", "def report(self) -> computation_base.Computation:\n return self._report_fn", "def report_text(self):\n # type: () -> Optional[AnyStr]\n return f\"\\n{self.gen_report(as_dict=False)}\"", "def report(self):\n if isinstance(self.steps[-1][1], Regressor):\n return self.steps[-1][1].regression_report()\n else:\n return self.steps[-1][1].classification_report()", "def _report_name(self, cls, name):\n raise NotImplementedError('Class must implement _reportname!')", "def generate_report_for_paper(self):\n paper_report = self.metrics_calculator.report_metrics(report_type=\"paper\")\n class_numbers = sorted(self.idx2labelname_mapping.keys(), reverse=False)\n row_names = [\n f\"class_{class_num} - ({self.idx2labelname_mapping[class_num]})\"\n for class_num in class_numbers\n ]\n row_names.extend([f\"Micro-Fscore\", f\"Macro-Fscore\"])\n return paper_report, row_names", "def create_report(self, output):\n if output == 'xml':\n report = super(Report, self).create_report()\n return report\n elif output == 'csv':\n return self.statement_detail_csv()", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def use_report(self) -> str:\n self._remove_tick()\n return self._calc_and_report()", "def reportinfo(self):\n return self.fspath, 0, f\"usecase: {self.name}\"", "def report():\n Robot.report()", "def __init__(self):\n OWSReport.__init__(self)\n self.stats['type'] = 'OGC:WCS'\n self.stats['operations']['GetCoverage'] = {}\n self.stats['operations']['GetCoverage']['hits'] = 0\n self.stats['operations']['GetCoverage']['resource'] = {}\n self.stats['operations']['GetCoverage']['resource']['param'] = 'coverage'\n self.stats['operations']['GetCoverage']['resource']['list'] = {}\n self.stats['operations']['DescribeCoverage'] = {}\n self.stats['operations']['DescribeCoverage']['hits'] = 0", "def report(self):\n for c in self._call_chain:\n print c.title\n print '=' * len(c.title)\n c.report()\n print", "def report(self, report_options=None):\n if not report_options:\n report_options = {\n \"output_format\": None,\n \"omit_keys\": None,\n }\n\n output_format = report_options.get(\"output_format\", None)\n omit_keys = report_options.get(\"omit_keys\", None)\n\n report = OrderedDict([\n (\"global_stats\", {\n \"samples_used\": self.total_samples,\n \"empty_line_count\": self._empty_line_count,\n \"file_type\": self.file_type,\n \"encoding\": self.encoding,\n \"memory_size\": self.memory_size,\n \"times\": self.times,\n }),\n (\"data_stats\", OrderedDict()),\n ])\n report[\"data_stats\"] = self._profile.profile\n return _prepare_report(report, output_format, omit_keys)", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def report(self):\n print(f\"Water: {self.resources['water']}ml\")\n print(f\"Milk: {self.resources['milk']}ml\")\n print(f\"Coffee: {self.resources['coffee']}g\")", "def GenReportDump(self): # Override template method entirely and do it ourselves\n if not self.result: # Update: THIS MAY BE VALIDLY EMPTY if there are no classes in a file\n # print \"Warning, should call calc_plant_uml() after .Parse() and before str(p) - repairing...\"\n self.calc_plant_uml()\n return self.result", "def report(self, report):\n\n self._report = report", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def __init__(self, client):\n super(Reports, self).__init__(client)", "def report_full(*args, **kwargs): # real signature unknown\n pass", "def report(self):\n self.report_status()\n print\n self.report_charset()\n print\n self.report_key()\n print\n self.report_keyset()", "def default_get(self, fields):\n res = super(VcsTaxReport, self).default_get(fields)\n if not fields:\n return res\n res.update({'report_type': '_SC_VAT_TAX_REPORT_',})\n return res", "def _create_report(self, report_type, report_key, report_name):\n\n listOfReports = self.model.find(xmlns + 'ListOfReports')\n \n #Check a report with the current key doesn't already exist. If it does, delete it\n foundReport = False\n for report in listOfReports:\n if report.attrib['key'] == report_key:\n foundReport = report\n if foundReport:\n listOfReports.remove(foundReport)\n\n #Next, look through and check to see if a report with the report_name already exists. If it does, delete it\n \n listOfReports = self.model.find(xmlns + 'ListOfReports')\n foundReport = False\n for report in listOfReports:\n if report.attrib['name'] == report_name:\n foundReport = report\n if foundReport:\n listOfReports.remove(foundReport)\n\n if report_type == 'SO':\n\n newReport = etree.SubElement(listOfReports, xmlns + 'Report')\n newReport.set('key', report_key)\n newReport.set('name', report_name)\n newReport.set('taskType', 'optimization')\n newReport.set('seperator', '&#x09;')\n newReport.set('precision', '6')\n \n newReport_Comment = etree.SubElement(newReport, xmlns + 'Comment')\n newReport_Comment_body = etree.SubElement(newReport_Comment, xmlns + 'body')\n newReport_Comment_body.set('xmlns', 'http://www.w3.org/1999/xhtml')\n newReport_Comment_body.text = 'Report automatically generated by condor-copasi'\n\n #Create the body\n newReport_Body = etree.SubElement(newReport, xmlns + 'Body')\n\n newReport_Body_Object1 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object1.set('cn','String=#----\\n')\n\n newReport_Body_Object2 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object2.set('cn','String=Evals \\= ')\n\n newReport_Body_Object3 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object3.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations')\n\n newReport_Body_Object4 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object4.set('cn','String=\\nTime \\= ')\n\n newReport_Body_Object5 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object5.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time')\n\n newReport_Body_Object6 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object6.set('cn','String=\\n')\n\n newReport_Body_Object7 = etree.SubElement(newReport_Body, xmlns + 'Object')\n newReport_Body_Object7.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value')\n \n #And put the same objects in the footer\n newReport_Footer = etree.SubElement(newReport, xmlns + 'Footer')\n\n newReport_Footer_Object1 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object1.set('cn','String=#----\\n')\n\n newReport_Footer_Object2 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object2.set('cn','String=Evals \\= ')\n\n newReport_Footer_Object3 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object3.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations')\n\n newReport_Footer_Object4 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object4.set('cn','String=\\nTime \\= ')\n\n newReport_Footer_Object5 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object5.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time')\n\n newReport_Footer_Object6 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object6.set('cn','String=\\n')\n\n newReport_Footer_Object7 = etree.SubElement(newReport_Footer, xmlns + 'Object')\n newReport_Footer_Object7.set('cn','CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value')\n \n elif report_type == 'SS':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"timeCourse\" separator=\"&#x09;\" precision=\"6\">\n <Comment>\n A table of time, variable species particle numbers, variable compartment volumes, and variable global quantity values.\n </Comment>\n <Table printTitle=\"1\">\n \n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n model_name = self.get_name()\n \n table = report.find(xmlns + 'Table')\n time_object = etree.SubElement(table, xmlns + 'Object')\n time_object.set('cn', 'Model=' + model_name + ',Reference=Time')\n \n for variable in self.get_variables():\n row = etree.SubElement(table, xmlns + 'Object')\n row.set('cn', variable) \n \n listOfReports.append(report)\n \n elif report_type == 'OR':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"optimization\" separator=\"&#x09;\" precision=\"6\">\n <Comment>\n \n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Optimization],Problem=Optimization,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report)\n \n elif report_type == 'PR':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"parameterFitting\" separator=\"&#x09;\" precision=\"6\">\n<Comment>\n Condor Copasi automatically generated report.\n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report)\n \n \n \n \n elif report_type == 'SP':\n #Use the following xml string as a template\n report_string = Template(\n \"\"\"<Report xmlns=\"http://www.copasi.org/static/schema\" key=\"${report_key}\" name=\"${report_name}\" taskType=\"parameterFitting\" separator=\"&#x09;\" precision=\"6\">\n<Comment>\n Condor Copasi automatically generated report.\n </Comment>\n <Table printTitle=\"1\">\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Parameters\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Best Value\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Timer=CPU Time\"/>\n <Object cn=\"CN=Root,Vector=TaskList[Parameter Estimation],Problem=Parameter Estimation,Reference=Function Evaluations\"/>\n </Table>\n </Report>\"\"\"\n ).substitute(report_key=report_key, report_name=report_name)\n report = etree.XML(report_string)\n \n listOfReports.append(report) \n else:\n raise Exception('Unknown report type')", "def reports_public(self):\r\n return reports.ReportsPublic(self)", "def ReportNumbers(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('report_numbers', default)\n return [HEP.ReportNumberObject(i) for i in tmp]", "def report(self):\n print(f\"Water: {self.resources['water']}ml\")\n print(f\"Milk: {self.resources['milk']}ml\")\n print(f\"Coffee: {self.resources['coffee']}g\")", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def print_report(self, obj):\n return mark_safe(obj.report)", "def final_report_file(self, instance):\r\n return admin_display_file(instance, 'final_report')", "def get_report(self) -> str:\n return self.diagnostics.get_report()", "def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))", "def report(self, kind=\"summary\"):\r\n header = [\"identity\", \"name\",\r\n \"cost\", \"leadtime\"]\r\n data = zip(self.idents, self.names,\r\n self.costs, self.leadtimes)\r\n cost = 0.0\r\n leadtime = 0.0\r\n defective = 0\r\n for _, _, price, lead in data:\r\n try:\r\n cost += float(price)\r\n leadtime = max(float(lead), leadtime)\r\n except TypeError:\r\n defective += 1\r\n except ValueError:\r\n defective += 1\r\n\r\n if kind == \"report\":\r\n verbose = [dict(zip(header, record)) for record in data]\r\n report = {\"detail\": verbose,\r\n \"cost\": cost, \"leadtime\": leadtime,\r\n \"defective\": defective}\r\n else:\r\n report = {\"cost\": cost, \"leadtime\": leadtime,\r\n \"defective\": defective}\r\n\r\n return report", "def CompileReport(self, mediator):\n return", "def legacy_reporter(self):\n logging.info('Creating database-friendly summary report')\n header = '{}\\n'.format(','.join(self.legacy_headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SequencingDate\n data += GenObject.returnattr(sample.run, 'Date')\n # Analyst\n data += GenObject.returnattr(sample.run, 'InvestigatorName')\n # Legacy ConFindr clean/contaminated call\n data += 'ND,'\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # NumClustersPF\n data += GenObject.returnattr(sample.run, 'NumberofClustersPF')\n # Percentage of reads mapping to PhiX control\n data += GenObject.returnattr(sample.run, 'phix_aligned')\n # Error rate calculated from PhiX control\n data += GenObject.returnattr(sample.run, 'error_rate')\n # LengthForwardRead\n data += GenObject.returnattr(sample.run, 'forwardlength',\n number=True)\n # LengthReverseRead\n data += GenObject.returnattr(sample.run, 'reverselength',\n number=True)\n # Real time strain\n data += GenObject.returnattr(sample.run, 'Description')\n # Flowcell\n data += GenObject.returnattr(sample.run, 'flowcell')\n # MachineName\n data += GenObject.returnattr(sample.run, 'instrument')\n # PipelineVersion\n data += self.commit + ','\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # cgMLST\n try:\n if type(sample.cgmlst.sequencetype) is list:\n if sample.cgmlst.sequencetype:\n cgmlst_seq_type = ';'.join(sorted(sample.cgmlst.sequencetype)).rstrip(';') + ','\n else:\n cgmlst_seq_type = 'ND,'\n else:\n cgmlst_seq_type = GenObject.returnattr(sample.cgmlst, 'sequencetype')\n # cgmlst_seq_type = cgmlst_seq_type if cgmlst_seq_type != 'ND,' else 'new,'\n data += cgmlst_seq_type\n except AttributeError:\n data += 'ND,'\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'legacy_combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)", "def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n },\n }\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `_get_report_values()` and pass `data` automatically.\n return self.env.ref('base_enh.recap_report').report_action(self, data=data)", "def request(self, **kwargs):\n if not hasattr(self, kwargs['report']):\n raise AttributeError(f'Report {kwargs[\"report\"]} not exist')\n report_name = kwargs.pop('report')\n return getattr(self, report_name)(**kwargs)", "def __init__(self, report: Report, images: list[Image] = None) -> None:\n super().__init__(report, images)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def main():\n r = ReportHelper()\n today = dt.today()\n\n start_date = (today - timedelta(days=1)).strftime('%Y-%m-%d')\n end_date = today.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(start_date, end_date, 'daily')\n logger.debug('Daily report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n logger.debug(json.dumps(ingestion_results, indent=2))\n\n if time_to_generate_monthly_report(today):\n last_day_of_prev_month = date(today.year, today.month, 1) - timedelta(days=1)\n last_month_first_date = last_day_of_prev_month.strftime('%Y-%m-01')\n last_month_end_date = last_day_of_prev_month.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(last_month_first_date,\n last_month_end_date,\n 'monthly')\n logger.debug('Monthly report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n\n return response", "def report(self, result):\n raise NotImplementedError", "def _create_report_record(self, trade, common_object, reset_period,\n nominal, provision, short_end_rate, forward_rate):\n pass", "def report_header(self):\n pass", "def generate_xml_report(self, parser, data, objects,context=None):\n raise NotImplementedError()", "def register_reports(self):\n from ckanext.qa import reports\n return [reports.openness_report_info]", "def __init__(self):\n self.export_name = \"costreport\"\n self.container = \"test_container\"\n self.directory = \"cost\"\n self.test_date = datetime(2019, 8, 15)\n self.month_range = utils.month_date_range(self.test_date)\n self.report_path = f\"{self.directory}/{self.export_name}/{self.month_range}\"\n self.export_uuid = \"9c308505-61d3-487c-a1bb-017956c9170a\"\n self.export_file = f\"{self.export_name}_{self.export_uuid}.csv\"\n self.export_etag = \"absdfwef\"\n self.last_modified = DateAccessor().today()\n self.export_key = f\"{self.report_path}/{self.export_file}\"\n self.bad_test_date = datetime(2019, 7, 15)\n self.bad_month_range = utils.month_date_range(self.bad_test_date)\n self.bad_report_path = f\"{self.directory}/{self.export_name}/{self.bad_month_range}\"", "def init_report(self, report):\n super(InformedPlannerHierarchy, self).init_report(report)\n if True:\n self.cover.draw_embeddings(report.section('embeddings'))\n \n self.display_distancetree(report.section('distancetree'))", "def reports_cli():", "def report(self):\n\n print(str(self))\n total_str = \"{0:,.2f}\".format(self.total)\n\n print()\n report = \" Taxes Breakdown\\n\"\n report += \"==================================\\n\"\n for i, t in enumerate(self.tax_amounts):\n t_amount_str = \"{0:,.2f}\".format(t.amount)\n t_rate_str = \"{0:,.2f}\".format(t.rate)\n t_tax_str = \"{0:,.2f}\".format(t.tax)\n report += \"{}{} X {} ={}{}\\n\".format(\" \" * (12 - len(t_amount_str)), t_amount_str, t_rate_str,\n \" \" * (13 - len(t_tax_str)), t_tax_str)\n\n report += \"----------------------------------\\n\"\n report += \" Total ={}{}\".format(\" \" * (13 - len(total_str)), total_str)\n print(report)", "def Report(self):\n return True", "def __init__(self):\n self._tool_data = {}\n self.feedback = []\n self.ignored_feedback = []\n self.suppressions = {}\n self.suppressed_labels = {}\n self.hiddens = set()\n self.groups = []\n self.group = None\n self.group_names = {}\n self.hooks = {}\n self.class_hooks = {}\n self.submission = None\n self.format = Formatter()\n self.result = None\n self.resolves = []\n self.overridden_feedbacks = set()\n log.debug(\"New Pedal Report created.\")", "def default(cls, ):\n return cls.fromMetrics()", "def default(cls, ):\n return cls.fromMetrics()", "def report_types():\n return [ReportClass for name, ReportClass in REPORT_REGISTRY.items() if name != \"BaseReport\"]", "def report_type(self, report_type):\n\n self._report_type = report_type", "def report_type(self, report_type):\n\n self._report_type = report_type", "def report_type(self, report_type):\n\n self._report_type = report_type", "def test_report(self):\n self.assertEqual('Product heeft 0 dagen.', self.__metric.report())", "def reportProperties():", "def _export(self, report_type):\n model = self.env['report_trial_balance_contabilidad_cfdi']\n report = model.create(self._prepare_report_trial_balance())\n report.compute_data_for_report()\n return report.print_report(report_type)", "def _get_report(self) -> Report:\n\n logging.info(\"Generating execution report\")\n\n report = Report(additional_data=self.additional_data)\n\n for law in self.laws:\n report.passed += self.__get_rule_items(law, law.passed_rules)\n report.failed += self.__get_rule_items(law, law.failed_rules)\n report.skipped += self.__get_rule_items(law, law.skipped_rules)\n\n logging.info(\"Execution report generated\")\n\n return report", "def _get_report(self, entry):\n # format the report\n description = self.FULL_MESSAGE_TEMPLATE.format(\n db=entry.get('db', 'n/a'),\n host=entry.get('@source_host'),\n client=entry.get('client', 'n/a'),\n query_time=entry.get('query_time', 'n/a'),\n method=entry.get('query_class'),\n query=entry.get('query'),\n entry=json.dumps(entry, indent=True),\n ).strip()\n\n # add a fake path to the class, to that we will try to make classifier attach a proper component\n class_name = entry.get('query_class', '').split(':')[0]\n description += '\\n\\nPossible source file:\\n* /extensions/wikia/{}:1'.format(class_name)\n\n return Report(\n summary='[{method}] Long running query was killed by mysql-killer'.format(method=entry.get('query_class')),\n description=description,\n label=self.REPORT_LABEL\n )", "def print_report(report_data):\n\n header = '\\nPROPERTY SUMMARY FOR \"{}\"\\n'.format(report_data['property_name'])\n print('* ' * (len(header) // 2))\n print(header)\n\n print('Property Type:'.ljust(25), report_data['property_type'])\n print('Number of Bedrooms:'.ljust(25), report_data['rooms'])\n print('Number of Bathrooms:'.ljust(25), report_data['bathrooms'])\n\n not_found = ['n/a'] # Print this if nothing found for category\n\n print('\\nAMENITIES:')\n\n for amenity in report_data['general_amenities']:\n print(' * ', amenity)\n\n print('\\nFAMILY AMENITIES:')\n\n for amenity in report_data['family_amenities'] or not_found:\n print(' * ', amenity)\n\n print('\\nSAFETY FEATURES:')\n\n for amenity in report_data['safety_feats'] or not_found:\n print(' * ', amenity)\n\n print('\\n')\n\n return", "def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)", "def initialize_report(output_dir,\n subject_name='Subject',\n log=True,\n filename='report',\n prepreproc_undergone=\"\",\n dcm2nii=False,\n deleteorient=False,\n fwhm=None, anat_fwhm=None,\n slice_timing=False,\n realign=False,\n coregister=False,\n coreg_func_to_anat=False,\n segment=False,\n normalize=False,\n dartel=False,\n command_line=None,\n has_func=True\n ):\n report_outfile = os.path.join(output_dir, '{}.html'.format(filename))\n\n report_dict = {}\n report_dict['preproc_undergone'] = generate_preproc_steps_docstring(\n dcm2nii=dcm2nii,\n deleteorient=deleteorient,\n slice_timing=slice_timing,\n realign=realign,\n coregister=coregister,\n segment=segment,\n normalize=normalize,\n fwhm=fwhm, anat_fwhm=anat_fwhm,\n dartel=dartel,\n coreg_func_to_anat=coreg_func_to_anat,\n prepreproc_undergone=prepreproc_undergone,\n has_func=has_func\n )\n report_dict['subject_name'] = subject_name\n report_dict['start_time'] = strftime(\"%d-%b-%Y %H:%M:%S\", gmtime())\n report_dict['end_time'] = \"STILL RUNNING...\"\n report_text = embed_in_HTML('report_template.html', report_dict)\n report_HTML = HTMLDocument(report_text).save_as_html(report_outfile)\n\n if log:\n # create a separate HTML with all the logs\n log_outfile = os.path.join(output_dir, '{}_log.html'.format(filename))\n log_HTML = HTMLDocument(\"<html><body>\").save_as_html(log_outfile)\n return report_outfile, log_outfile\n else:\n return report_outfile, None", "def main():\n reportSample = CompatibilityReportSample()\n reportSample.run()", "def create_report(self, data_def_request):\n\n report = Report(self.appresponse)\n report.add(data_def_request)\n report.run()\n return report", "def generate_report(self, report_folder):\n if not self.thrift_client:\n raise IllegalMethodCallException(\"generate_report() must be called after check_layout()\")\n logger.info(\"Generating reports in \" + report_folder)\n self.thrift_client.generate_report(report_folder)\n self.thrift_client.quit_service_if_inactive()", "def default_start(self, data):\n return {\n 'message': \"This wizard will export shipment status for all the \" +\n \"shipments related to this store view. To export tracking \" +\n \"information also for these shipments please check the \" +\n \"checkbox for Export Tracking Information on Store View.\"\n }", "def interim_report_file(self, instance):\r\n return admin_display_file(instance, 'interim_report')", "def __set_report_path(self):\n self.report_path = os.path.join(self.get_report_path(), \"cyclomatic_report\")\n Path(self.report_path).mkdir(parents=True, exist_ok=True)", "def initialize_report(report_type, start_date, end_date, start_letter=None, end_letter=None):\r\n for item in REPORT_TYPES:\r\n if report_type in item:\r\n return item[1](start_date, end_date, start_letter, end_letter)\r\n raise ReportTypeDoesNotExistException" ]
[ "0.7427176", "0.7222436", "0.7195777", "0.71178955", "0.7061699", "0.6907026", "0.68175685", "0.67216134", "0.67009014", "0.6695978", "0.66680425", "0.66522896", "0.6502363", "0.64432687", "0.64130306", "0.6410636", "0.636051", "0.63515186", "0.6334159", "0.6313862", "0.63041276", "0.6271433", "0.61974555", "0.6176468", "0.61383027", "0.6131349", "0.6006281", "0.5928357", "0.5921651", "0.5905903", "0.5890308", "0.5883383", "0.58818555", "0.58799744", "0.5865912", "0.5836839", "0.5828177", "0.58257824", "0.58226883", "0.58113533", "0.5810696", "0.5795373", "0.577375", "0.57683915", "0.5760675", "0.57605666", "0.57484406", "0.5747796", "0.5743767", "0.5741701", "0.57401305", "0.5732454", "0.57174695", "0.56972766", "0.56851107", "0.56768", "0.56677616", "0.5663178", "0.56584895", "0.56563413", "0.5654164", "0.5643043", "0.56429094", "0.5640839", "0.56380856", "0.563441", "0.5626753", "0.5626705", "0.5621709", "0.5620629", "0.56182003", "0.5613153", "0.56061053", "0.56034833", "0.5595978", "0.5595592", "0.55947727", "0.55819774", "0.5572628", "0.555061", "0.555061", "0.5541188", "0.55405", "0.55405", "0.55405", "0.55358547", "0.55338633", "0.55331504", "0.55318165", "0.55260664", "0.5523468", "0.552274", "0.5515852", "0.5512221", "0.55084485", "0.54985297", "0.54984736", "0.54961795", "0.5493543", "0.5492469" ]
0.58354574
36
Test that the text is correct.
def test_changed_status_text(self): scale = "count" metric1 = dict( type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[ dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=42, status="target_not_met")), ], ) metric2 = dict( type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[ dict(count=dict(value=5, status="target_met")), dict(count=dict(value=10, status="target_not_met")), ], ) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_changed") metric_notification_data2 = MetricNotificationData(metric2, self.data_model, "status_changed") notification = Notification( self.report, [metric_notification_data1, metric_notification_data2], "destination_uuid", {} ) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 2 metrics that are notable:\n\n" "* Metric status is red (target not met), was yellow (near target met). Value is 42 units, was 0 units.\n" "* Metric status is red (target not met), was green (target met). Value is 10 units, was 5 units.\n", text, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def test_plain_text():\n source_file = os.path.join(_TESTS_DIR, 'plain_text_sample.txt')\n file_reports = psca.analyze([source_file], _SETTINGS_FILE,\n profile='test_04')\n reports = file_reports[0].reports\n # The elements of the found_errors and expected_errors sets are\n # tuples with two elements: (<rule code>, <line>)\n found_errors = {(r.rule_code, r.line) for r in reports}\n expected_errors = {\n (4, 7),\n (5, None),\n (6, 3),\n }\n undetected_errors = expected_errors - found_errors\n assert len(undetected_errors) == 0, \\\n f'Undetected errors: {undetected_errors}'\n unexpected_errors = found_errors - expected_errors\n assert len(unexpected_errors) == 0, \\\n f'Unexpected errors: {unexpected_errors}'", "def test_text_field():", "def assertText(self,content,expected_text,description=\"\"): \n self.assertTrue(expected_text in content,\n \"expected to find '{0}' but found '{1}' instead.\\\n Attemted action: {2}\".format(expected_text, \n content,\n description))", "def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def testText(self):\n lc = self.CreateConsole()\n contents = \"\"\n self.assertEqual(contents, lc.GetText())\n for str in ('a', 'foo', '\\n\\n\\n', 'bar\\nbaz\\n choke choke zapf'):\n contents += str\n lc.AppendText(str)\n self.assertEqual(contents, lc.GetText())", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def assert_text(self, path, contents):\n assert isinstance(contents, text_type)\n data = self.fs.gettext(path)\n self.assertEqual(data, contents)\n self.assertIsInstance(data, text_type)", "def test_textlines_field():", "def test_text(self):\n result = self._do_output(o.TextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def testIsText(self):\n parser = text_parser.PyparsingSingleLineTextParser()\n\n bytes_in = b'this is My Weird ASCII and non whatever string.'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = 'Plaso Síar Og Raðar Þessu'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'\\x01\\\\62LSO\\xFF'\n self.assertFalse(parser._IsText(bytes_in))\n\n bytes_in = b'T\\x00h\\x00i\\x00s\\x00\\x20\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii Open then...\\x00\\x99\\x23'\n self.assertFalse(parser._IsText(bytes_in))", "def test_initialization_of_homework_text():\n assert oop_hw.text == \"Learn OOP\"", "def test_text_is_correct(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n 'lang': 'en'\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']", "def test_third_equal(self):\n self.assertEqual(heaviest_word(\"take me to semynak\"), \"semynak\")", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def test_valid_text_str(self):\n f = lws.valid_text\n assert f('string', r'[a-z]*') is True\n assert f('string', r'string') is True\n assert f('string', r'[0-9]*') is False\n assert f('', r'.*') is False\n assert f('abcde', lambda x: 'e' in x) is True\n assert f('abcde', lambda x: 'f' in x) is False", "def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))", "def assertTextEq(self, a, b, msg=None):\n trans = str.maketrans({\n \" \": \"\",\n \"\\n\": \"\"\n })\n\n if isinstance(a, iAntlr4GramElem):\n a = a.toAntlr4()\n\n if isinstance(b, iAntlr4GramElem):\n b = b.toAntlr4()\n\n a = a.translate(trans)\n b = b.translate(trans)\n \n self.assertEqual(a, b, msg=msg)", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def test_training_content(self):\n self.assertIsInstance(self.one_off_training.content, str)\n self.assertEqual(self.one_off_training.content, \"1h d'endurance\")", "def test_email_text():\n new_text = mailroom.compose_email(\"Willie Nelson\", 12.34)\n reference_text = \"\\nDear Willie Nelson,\\n\\\nThank you for your generous gift of $12.34! It will help Local Charity\\\n achieve our mission.\\n\\\nBest regards,\\n\\\nLocal Charity\\n\\n\"\n assert new_text == reference_text", "def test_lessthan(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)", "def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass", "def test_prep_textarea(self):\n pass", "def test_validate_text_input(self):\n region = \"Bayern\"\n region_false = \"B\"\n a = validate_text_input(region)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_text_input(region_false)\n self.assertTrue(\"Kein gültiger Wert für Region\" or\n \"No valid value for region\" in\n str(context.exception))", "def test_issue859(en_tokenizer, text):\n doc = en_tokenizer(text)\n assert doc.text == text", "def test_get_lyrics_invalid_format(bot):\n assert get_lyrics('asdf', 1) == 'Invalid format!'", "def test_good_transcript(self):\r\n good_sjson = _create_file(content=textwrap.dedent(\"\"\"\\\r\n {\r\n \"start\": [\r\n 270,\r\n 2720\r\n ],\r\n \"end\": [\r\n 2720,\r\n 5430\r\n ],\r\n \"text\": [\r\n \"Hi, welcome to Edx.\",\r\n \"Let&#39;s start with what is on your screen right now.\"\r\n ]\r\n }\r\n \"\"\"))\r\n\r\n _upload_sjson_file(good_sjson, self.item.location)\r\n self.item.sub = _get_subs_id(good_sjson.name)\r\n\r\n text, filename, mime_type = self.item.get_transcript()\r\n\r\n expected_text = textwrap.dedent(\"\"\"\\\r\n 0\r\n 00:00:00,270 --> 00:00:02,720\r\n Hi, welcome to Edx.\r\n\r\n 1\r\n 00:00:02,720 --> 00:00:05,430\r\n Let&#39;s start with what is on your screen right now.\r\n\r\n \"\"\")\r\n\r\n self.assertEqual(text, expected_text)\r\n self.assertEqual(filename[:-4], self.item.sub)\r\n self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')", "def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")", "def test_text(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_text')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_text ' \\\n '( value TEXT NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_text VALUES (%s)'\n for i in range(10):\n item = random_string(100000)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_text'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, unicode)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_text')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_text')\n cursor.execute(query)\n conn.commit()", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_read_text(pdf_path):\n pdf_reader = PdfReader(path=pdf_path)\n text = pdf_reader.ocr_text()\n\n # We hard code this comparison to keep track of all changes to this metric\n assert pdf_reader.mean_confidence == 89\n assert pdf_reader.page_confidences == [86, 91]\n\n # Check if we have two pages seperated by pagebreaks\n assert len(text.split('\\f')) == 2\n\n # The same content can be extracted from the pages property\n assert '\\f'.join(pdf_reader.pages) == text\n\n # Content on the first page (important that this is at the beginning)\n assert 'Norwegian University of Science and Technology' in text[:50]\n\n # Content on second page (important that this is at the end)\n assert 'two requirements' in text[-50:]\n\n # The double-f in affine is hard for bad OCR algorithms\n assert 'affine' in text", "def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))", "def test_analysis_screen_with_clean_text(client, text_to_analyse):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': text_to_analyse})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def verify(self):\n\n # tekstlig testing om koden fungerer\n text = self.klar_tekst_start + \" ble sendt til mottaker som krypteringen \" + \\\n self.crypto + \".\\nMottaker dekrypterte dette til \" + self.klar_tekst_slutt\n\n return text", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def test_analyze_text():\n # Create a lexer instance and analyze a text with some rules\n new_dict = lex._lexer(None, None).analyze_text(\n \"test\", [lex_bases.rule(\"JUMP_LINE\", r\"\\n\"), lex_bases.rule(\"TEST\", r\"test\")]\n )\n\n # Check if the returned values are correct\n assert (\n new_dict[\"token\"] == lex_bases.token(\"TEST\", \"test\")\n and new_dict[\"fit_with_a_rule\"]\n and new_dict[\"rule_that_matched\"] == lex_bases.rule(\"TEST\", r\"test\")\n )", "def test_second_equal(self):\n self.assertEqual(heaviest_word(\"what time are we climbing up to the volcano\"), \"volcano\")", "def test_from_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text", "def test_analysis_screen_with_dirty_text(client, dirty_text):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': dirty_text})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def test_character_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[1], 133)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def teststring(self):\n self.assertRaises(palindrome.NotStringError,palindrome.palin, 4)", "def validate(self, text):\n raise NotImplementedError()", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_correctness(self, expected, got):\n expected_lines = expected.strip().splitlines()\n got_lines = got.strip().splitlines()\n if len(got_lines) != len(expected_lines):\n return False\n else:\n for exp, got in zip(expected_lines, got_lines):\n if self.params['strictwhitespace']:\n if exp.rstrip() != got.rstrip():\n return False\n else:\n if exp.strip().split() != got.strip().split():\n return False\n return True", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Two Pairs As and 4s')", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def test_string():", "def test_snippet_beginning_nonletter(self):\n message = Message(clean_text=u\"!I already know what this will be!!!!!\")\n self.assertEqual(\n message.snippet,\n 'I already know what...'\n )", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def test_get_survey_as_tex(self):\n generic = str(self.generic)\n should_contain = [\n \"documentclass[11pt]{article}\",\n \"title{My title}\",\n \"Test management footer.\",\n \"Aèbc?\",\n \"Bècd?\",\n \"Cède?\",\n \"\",\n ]\n for text in should_contain:\n self.assertIn(text, generic)\n specific = str(self.specific)\n should_contain = [\n \"documentclass[11pt]{report}\",\n \"title{My title}\",\n \"This is the footer.\",\n \"{Lorem ipsum dolor sit amët\",\n \"adipiscing} elit.'\",\n \"with 'K.' standing for 'Yës'\",\n \"'Nah' standing for 'No' or 'Whatever'\",\n ]\n for text in should_contain:\n self.assertIn(text, specific)", "def test_textCondition(self):\n xp = XPathQuery(\"/foo[text() = 'somecontent']\")\n self.assertEqual(xp.matches(self.e), True)", "def test_clean_description(self):\n text = '!@#$%^&*()_+1234567890-='\n self.assertEqual(sync.clean_description(text),\n '!@#$%^&*()_+1234567890-=')\n\n text = \"Darwin\\u00c2\\u00bfs Bulldog\"\n self.assertEqual(sync.clean_description(text), \"Darwin's Bulldog\")\n\n text = \"\\n\\r\\nSome<BR><br /></BR>Text\"\n self.assertEqual(sync.clean_description(text), \"\\n\\r\\nSome\\n\\nText\")", "def test_text_editor():\n assert chap2.text_editor()", "def expected_rubbish(self):", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def test_index(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(\"<h2>Please Write your Text</h2>\", result.data)", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_sanity(self) -> None:\n if self.report.headlines:\n return\n\n if self.report.document.paragraphs:\n self.add_error(\n \"Rubrikerna i dokumentet är felformaterade eller saknas. \"\n \"Rubrikerna ska vara skrivna i versaler och ha samma \"\n \"typsnitt, stil och storlek som brödtexten. \"\n \"Rubriker avslutas med radbrytning.\"\n )\n\n if not self.report.document.paragraphs:\n self.add_error(\"Ditt dokument är antigen tomt eller i fel format.\")", "def test_for_correct_updating_one(self):\r\n assert increase_sentence_count_if_we_should('one. two. three.', 3, 'a') == (4, 'one. two. three.a')", "def test_force_text_exception(self):\n class MyString(object):\n def __str__(self):\n return b'\\xc3\\xb6\\xc3\\xa4\\xc3\\xbc'\n\n __unicode__ = __str__\n\n # str(s) raises a TypeError on python 3 if the result is not a text type.\n # python 2 fails when it tries converting from str to unicode (via ASCII).\n exception = TypeError if six.PY3 else UnicodeError\n self.assertRaises(exception, force_text, MyString())", "def test_text_resource(self):\n Repository = CTSCapitainsLocalResolver([\"./tests/testing_data/farsiLit\"])\n text, metadata = Repository.__getText__(\"urn:cts:farsiLit:hafez.divan.perseus-eng1\")\n self.assertEqual(\n len(text.citation), 4,\n \"Object has a citation property of length 4\"\n )\n self.assertEqual(\n text.getTextualNode(Reference(\"1.1.1.1\")).export(output=Mimetypes.PLAINTEXT),\n \"Ho ! Saki, pass around and offer the bowl (of love for God) : ### \",\n \"It should be possible to retrieve text\"\n )", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Royal Flush')", "def verify(self, text):\n\n components = text.split(self.HASHSEP)\n if len(components) != 2:\n print 'verify: cannot parse text [%s]' % text\n return False\n\n body, digest = components\n check = self.digest(body)\n\n if check == digest:\n return True\n else:\n print 'verify: Expected [%s] got [%s] text [%s]' % (\n digest, check, text)\n return False", "def test_still_needing_correction(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = \"asdasd\"\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n self.assertEqual(\n self.response2.data[\"errors\"][0][\"message\"],\n \"The token was not identified as Hebrew\",\n )", "def test_text_default(self):\n r = Review()\n self.assertEqual(\"\", r.text)", "def test_str_post(self):\n expected_str = self.post.text[:15]\n self.assertEqual(expected_str, str(self.post))", "def test_odd_title():\n test_string = \"And now to something completely different\"\n assert (test_string, \"\") == parse_title(test_string)", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_score_text1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, -1)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight Flush T high')", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "def test_invalid_text_search(aquarius_instance):\n text = \"foo_text\"\n with pytest.raises(ValueError):\n aquarius_instance.text_search(text=text, sort=\"foo_sort\")", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'One Pair 4s')", "def test_textAsEvent(self):\n self.assertEquals(\n textAsEvent(u\"Hello, World!\"),\n b\"data: Hello, World!\\n\\n\"\n )", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight High card 5')", "def _test_text(self, url, content, buffering):\n # read(-1), readable(), seekable()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n self.assertTrue(tf.readable())\n self.assertTrue(tf.seekable())\n self.assertEqual(tf.read(), content)\n self.assertEqual(tf.read(), \"\")\n\n # read(10)\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n chunk = tf.read(10)\n result += chunk\n if len(chunk) < 10:\n break\n self.assertEqual(result, content)\n\n # readline(), seek(), tell()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n rpos = tf.tell()\n tf.seek(0)\n tf.seek(rpos)\n chunk = tf.readline()\n result += chunk\n if len(chunk) == 0:\n break\n self.assertEqual(result, content)", "def test_valid(self):\n template = '{0} just right {1}'\n value_count = 2\n try:\n validate_str_substitution(template, value_count)\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def test_kyc_post_legal(self):\n pass", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def test_empty_string(self):\n string = \"\"\n expected = \"\"\n self.assertEqual(transliterate(string), expected)", "def test_textConditionUnicode(self):\n xp = XPathQuery(u\"//*[text()='\\N{SNOWMAN}']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.quux])", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def test_text(self):\n args = [\"hello world\"]\n namespace = self.parser.parse_args(args)\n self.assertEqual(namespace.text, \"hello world\")", "def testRandomWord(self):\n word1 = self.searcher.randomWord()\n word2 = self.searcher.randomWord()\n self.assertTrue(len(word1) > 1, 'Word length too short')\n self.assertTrue(len(word2) > 1, 'Word length too short')\n self.assertNotEqual(word1, word2, 'Found the same word')", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_art_from_taste_space(self):", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def test_str_title(self):\n post = self.post\n string = post.__str__()\n expected_string = post.text[:15]\n self.assertEqual(string, expected_string)" ]
[ "0.7780006", "0.71805084", "0.7163137", "0.69507134", "0.69128454", "0.6879445", "0.6878319", "0.68413955", "0.6837145", "0.6819704", "0.6761079", "0.67541283", "0.67370874", "0.6677631", "0.6638475", "0.6612591", "0.6599315", "0.6586486", "0.6543073", "0.65285075", "0.65231675", "0.6502826", "0.6502498", "0.64943993", "0.6465871", "0.64485925", "0.64285845", "0.64116067", "0.64033747", "0.63913935", "0.6383951", "0.6382173", "0.6381052", "0.63672763", "0.6361509", "0.63500655", "0.6341567", "0.6323626", "0.63162434", "0.6315863", "0.63126516", "0.6310868", "0.62901634", "0.62880754", "0.6269692", "0.6253142", "0.6242243", "0.62368524", "0.6226166", "0.6225162", "0.6219212", "0.61973494", "0.6186998", "0.61853606", "0.61806804", "0.6177581", "0.61732054", "0.61701363", "0.61558205", "0.6146815", "0.61440957", "0.6143074", "0.61406374", "0.6133817", "0.613119", "0.6125797", "0.61225164", "0.61220133", "0.6113499", "0.6108356", "0.61062855", "0.60927826", "0.6084598", "0.60820717", "0.6081194", "0.6079038", "0.6076694", "0.6070627", "0.6065014", "0.6049391", "0.604327", "0.6039355", "0.60364467", "0.6034222", "0.603235", "0.6026726", "0.60235447", "0.6022977", "0.6013017", "0.6008269", "0.6007533", "0.6006693", "0.6003491", "0.5998224", "0.5997961", "0.59892315", "0.5988417", "0.59788567", "0.5976028", "0.597406", "0.5965371" ]
0.0
-1
Test that the text is correct.
def test_unchanged_status_text(self): scale = "count" metric1 = dict(type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=42, status="near_target_met"))]) metric2 = dict(type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[dict(count=dict(value=5, status="target_met")), dict(count=dict(value=10, status="target_not_met"))]) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_long_unchanged") metric_notification_data2 = MetricNotificationData(metric2, self.data_model, "status_long_unchanged") notification = Notification(self.report, [metric_notification_data1, metric_notification_data2], "destination_uuid", {}) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 2 metrics that are notable:\n\n" "* Metric has been yellow (near target met) for three weeks. Value: 42 units.\n" "* Metric has been red (target not met) for three weeks. Value: 10 units.\n", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def test_plain_text():\n source_file = os.path.join(_TESTS_DIR, 'plain_text_sample.txt')\n file_reports = psca.analyze([source_file], _SETTINGS_FILE,\n profile='test_04')\n reports = file_reports[0].reports\n # The elements of the found_errors and expected_errors sets are\n # tuples with two elements: (<rule code>, <line>)\n found_errors = {(r.rule_code, r.line) for r in reports}\n expected_errors = {\n (4, 7),\n (5, None),\n (6, 3),\n }\n undetected_errors = expected_errors - found_errors\n assert len(undetected_errors) == 0, \\\n f'Undetected errors: {undetected_errors}'\n unexpected_errors = found_errors - expected_errors\n assert len(unexpected_errors) == 0, \\\n f'Unexpected errors: {unexpected_errors}'", "def test_text_field():", "def assertText(self,content,expected_text,description=\"\"): \n self.assertTrue(expected_text in content,\n \"expected to find '{0}' but found '{1}' instead.\\\n Attemted action: {2}\".format(expected_text, \n content,\n description))", "def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def testText(self):\n lc = self.CreateConsole()\n contents = \"\"\n self.assertEqual(contents, lc.GetText())\n for str in ('a', 'foo', '\\n\\n\\n', 'bar\\nbaz\\n choke choke zapf'):\n contents += str\n lc.AppendText(str)\n self.assertEqual(contents, lc.GetText())", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def assert_text(self, path, contents):\n assert isinstance(contents, text_type)\n data = self.fs.gettext(path)\n self.assertEqual(data, contents)\n self.assertIsInstance(data, text_type)", "def test_textlines_field():", "def test_text(self):\n result = self._do_output(o.TextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def testIsText(self):\n parser = text_parser.PyparsingSingleLineTextParser()\n\n bytes_in = b'this is My Weird ASCII and non whatever string.'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = 'Plaso Síar Og Raðar Þessu'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'\\x01\\\\62LSO\\xFF'\n self.assertFalse(parser._IsText(bytes_in))\n\n bytes_in = b'T\\x00h\\x00i\\x00s\\x00\\x20\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii Open then...\\x00\\x99\\x23'\n self.assertFalse(parser._IsText(bytes_in))", "def test_initialization_of_homework_text():\n assert oop_hw.text == \"Learn OOP\"", "def test_text_is_correct(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n 'lang': 'en'\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']", "def test_third_equal(self):\n self.assertEqual(heaviest_word(\"take me to semynak\"), \"semynak\")", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def test_valid_text_str(self):\n f = lws.valid_text\n assert f('string', r'[a-z]*') is True\n assert f('string', r'string') is True\n assert f('string', r'[0-9]*') is False\n assert f('', r'.*') is False\n assert f('abcde', lambda x: 'e' in x) is True\n assert f('abcde', lambda x: 'f' in x) is False", "def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))", "def assertTextEq(self, a, b, msg=None):\n trans = str.maketrans({\n \" \": \"\",\n \"\\n\": \"\"\n })\n\n if isinstance(a, iAntlr4GramElem):\n a = a.toAntlr4()\n\n if isinstance(b, iAntlr4GramElem):\n b = b.toAntlr4()\n\n a = a.translate(trans)\n b = b.translate(trans)\n \n self.assertEqual(a, b, msg=msg)", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def test_training_content(self):\n self.assertIsInstance(self.one_off_training.content, str)\n self.assertEqual(self.one_off_training.content, \"1h d'endurance\")", "def test_email_text():\n new_text = mailroom.compose_email(\"Willie Nelson\", 12.34)\n reference_text = \"\\nDear Willie Nelson,\\n\\\nThank you for your generous gift of $12.34! It will help Local Charity\\\n achieve our mission.\\n\\\nBest regards,\\n\\\nLocal Charity\\n\\n\"\n assert new_text == reference_text", "def test_lessthan(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)", "def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass", "def test_prep_textarea(self):\n pass", "def test_validate_text_input(self):\n region = \"Bayern\"\n region_false = \"B\"\n a = validate_text_input(region)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_text_input(region_false)\n self.assertTrue(\"Kein gültiger Wert für Region\" or\n \"No valid value for region\" in\n str(context.exception))", "def test_issue859(en_tokenizer, text):\n doc = en_tokenizer(text)\n assert doc.text == text", "def test_get_lyrics_invalid_format(bot):\n assert get_lyrics('asdf', 1) == 'Invalid format!'", "def test_good_transcript(self):\r\n good_sjson = _create_file(content=textwrap.dedent(\"\"\"\\\r\n {\r\n \"start\": [\r\n 270,\r\n 2720\r\n ],\r\n \"end\": [\r\n 2720,\r\n 5430\r\n ],\r\n \"text\": [\r\n \"Hi, welcome to Edx.\",\r\n \"Let&#39;s start with what is on your screen right now.\"\r\n ]\r\n }\r\n \"\"\"))\r\n\r\n _upload_sjson_file(good_sjson, self.item.location)\r\n self.item.sub = _get_subs_id(good_sjson.name)\r\n\r\n text, filename, mime_type = self.item.get_transcript()\r\n\r\n expected_text = textwrap.dedent(\"\"\"\\\r\n 0\r\n 00:00:00,270 --> 00:00:02,720\r\n Hi, welcome to Edx.\r\n\r\n 1\r\n 00:00:02,720 --> 00:00:05,430\r\n Let&#39;s start with what is on your screen right now.\r\n\r\n \"\"\")\r\n\r\n self.assertEqual(text, expected_text)\r\n self.assertEqual(filename[:-4], self.item.sub)\r\n self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')", "def test_text(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_text')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_text ' \\\n '( value TEXT NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_text VALUES (%s)'\n for i in range(10):\n item = random_string(100000)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_text'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, unicode)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_text')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_text')\n cursor.execute(query)\n conn.commit()", "def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_read_text(pdf_path):\n pdf_reader = PdfReader(path=pdf_path)\n text = pdf_reader.ocr_text()\n\n # We hard code this comparison to keep track of all changes to this metric\n assert pdf_reader.mean_confidence == 89\n assert pdf_reader.page_confidences == [86, 91]\n\n # Check if we have two pages seperated by pagebreaks\n assert len(text.split('\\f')) == 2\n\n # The same content can be extracted from the pages property\n assert '\\f'.join(pdf_reader.pages) == text\n\n # Content on the first page (important that this is at the beginning)\n assert 'Norwegian University of Science and Technology' in text[:50]\n\n # Content on second page (important that this is at the end)\n assert 'two requirements' in text[-50:]\n\n # The double-f in affine is hard for bad OCR algorithms\n assert 'affine' in text", "def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))", "def test_analysis_screen_with_clean_text(client, text_to_analyse):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': text_to_analyse})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def verify(self):\n\n # tekstlig testing om koden fungerer\n text = self.klar_tekst_start + \" ble sendt til mottaker som krypteringen \" + \\\n self.crypto + \".\\nMottaker dekrypterte dette til \" + self.klar_tekst_slutt\n\n return text", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def test_analyze_text():\n # Create a lexer instance and analyze a text with some rules\n new_dict = lex._lexer(None, None).analyze_text(\n \"test\", [lex_bases.rule(\"JUMP_LINE\", r\"\\n\"), lex_bases.rule(\"TEST\", r\"test\")]\n )\n\n # Check if the returned values are correct\n assert (\n new_dict[\"token\"] == lex_bases.token(\"TEST\", \"test\")\n and new_dict[\"fit_with_a_rule\"]\n and new_dict[\"rule_that_matched\"] == lex_bases.rule(\"TEST\", r\"test\")\n )", "def test_second_equal(self):\n self.assertEqual(heaviest_word(\"what time are we climbing up to the volcano\"), \"volcano\")", "def test_from_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text", "def test_analysis_screen_with_dirty_text(client, dirty_text):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': dirty_text})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def test_character_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[1], 133)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def teststring(self):\n self.assertRaises(palindrome.NotStringError,palindrome.palin, 4)", "def validate(self, text):\n raise NotImplementedError()", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_correctness(self, expected, got):\n expected_lines = expected.strip().splitlines()\n got_lines = got.strip().splitlines()\n if len(got_lines) != len(expected_lines):\n return False\n else:\n for exp, got in zip(expected_lines, got_lines):\n if self.params['strictwhitespace']:\n if exp.rstrip() != got.rstrip():\n return False\n else:\n if exp.strip().split() != got.strip().split():\n return False\n return True", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Two Pairs As and 4s')", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def test_string():", "def test_snippet_beginning_nonletter(self):\n message = Message(clean_text=u\"!I already know what this will be!!!!!\")\n self.assertEqual(\n message.snippet,\n 'I already know what...'\n )", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def test_get_survey_as_tex(self):\n generic = str(self.generic)\n should_contain = [\n \"documentclass[11pt]{article}\",\n \"title{My title}\",\n \"Test management footer.\",\n \"Aèbc?\",\n \"Bècd?\",\n \"Cède?\",\n \"\",\n ]\n for text in should_contain:\n self.assertIn(text, generic)\n specific = str(self.specific)\n should_contain = [\n \"documentclass[11pt]{report}\",\n \"title{My title}\",\n \"This is the footer.\",\n \"{Lorem ipsum dolor sit amët\",\n \"adipiscing} elit.'\",\n \"with 'K.' standing for 'Yës'\",\n \"'Nah' standing for 'No' or 'Whatever'\",\n ]\n for text in should_contain:\n self.assertIn(text, specific)", "def test_textCondition(self):\n xp = XPathQuery(\"/foo[text() = 'somecontent']\")\n self.assertEqual(xp.matches(self.e), True)", "def test_clean_description(self):\n text = '!@#$%^&*()_+1234567890-='\n self.assertEqual(sync.clean_description(text),\n '!@#$%^&*()_+1234567890-=')\n\n text = \"Darwin\\u00c2\\u00bfs Bulldog\"\n self.assertEqual(sync.clean_description(text), \"Darwin's Bulldog\")\n\n text = \"\\n\\r\\nSome<BR><br /></BR>Text\"\n self.assertEqual(sync.clean_description(text), \"\\n\\r\\nSome\\n\\nText\")", "def test_text_editor():\n assert chap2.text_editor()", "def expected_rubbish(self):", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def test_index(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(\"<h2>Please Write your Text</h2>\", result.data)", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def test_sanity(self) -> None:\n if self.report.headlines:\n return\n\n if self.report.document.paragraphs:\n self.add_error(\n \"Rubrikerna i dokumentet är felformaterade eller saknas. \"\n \"Rubrikerna ska vara skrivna i versaler och ha samma \"\n \"typsnitt, stil och storlek som brödtexten. \"\n \"Rubriker avslutas med radbrytning.\"\n )\n\n if not self.report.document.paragraphs:\n self.add_error(\"Ditt dokument är antigen tomt eller i fel format.\")", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_for_correct_updating_one(self):\r\n assert increase_sentence_count_if_we_should('one. two. three.', 3, 'a') == (4, 'one. two. three.a')", "def test_force_text_exception(self):\n class MyString(object):\n def __str__(self):\n return b'\\xc3\\xb6\\xc3\\xa4\\xc3\\xbc'\n\n __unicode__ = __str__\n\n # str(s) raises a TypeError on python 3 if the result is not a text type.\n # python 2 fails when it tries converting from str to unicode (via ASCII).\n exception = TypeError if six.PY3 else UnicodeError\n self.assertRaises(exception, force_text, MyString())", "def test_text_resource(self):\n Repository = CTSCapitainsLocalResolver([\"./tests/testing_data/farsiLit\"])\n text, metadata = Repository.__getText__(\"urn:cts:farsiLit:hafez.divan.perseus-eng1\")\n self.assertEqual(\n len(text.citation), 4,\n \"Object has a citation property of length 4\"\n )\n self.assertEqual(\n text.getTextualNode(Reference(\"1.1.1.1\")).export(output=Mimetypes.PLAINTEXT),\n \"Ho ! Saki, pass around and offer the bowl (of love for God) : ### \",\n \"It should be possible to retrieve text\"\n )", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Royal Flush')", "def verify(self, text):\n\n components = text.split(self.HASHSEP)\n if len(components) != 2:\n print 'verify: cannot parse text [%s]' % text\n return False\n\n body, digest = components\n check = self.digest(body)\n\n if check == digest:\n return True\n else:\n print 'verify: Expected [%s] got [%s] text [%s]' % (\n digest, check, text)\n return False", "def test_still_needing_correction(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = \"asdasd\"\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n self.assertEqual(\n self.response2.data[\"errors\"][0][\"message\"],\n \"The token was not identified as Hebrew\",\n )", "def test_text_default(self):\n r = Review()\n self.assertEqual(\"\", r.text)", "def test_str_post(self):\n expected_str = self.post.text[:15]\n self.assertEqual(expected_str, str(self.post))", "def test_odd_title():\n test_string = \"And now to something completely different\"\n assert (test_string, \"\") == parse_title(test_string)", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_score_text1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, -1)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight Flush T high')", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "def test_invalid_text_search(aquarius_instance):\n text = \"foo_text\"\n with pytest.raises(ValueError):\n aquarius_instance.text_search(text=text, sort=\"foo_sort\")", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'One Pair 4s')", "def test_textAsEvent(self):\n self.assertEquals(\n textAsEvent(u\"Hello, World!\"),\n b\"data: Hello, World!\\n\\n\"\n )", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight High card 5')", "def _test_text(self, url, content, buffering):\n # read(-1), readable(), seekable()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n self.assertTrue(tf.readable())\n self.assertTrue(tf.seekable())\n self.assertEqual(tf.read(), content)\n self.assertEqual(tf.read(), \"\")\n\n # read(10)\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n chunk = tf.read(10)\n result += chunk\n if len(chunk) < 10:\n break\n self.assertEqual(result, content)\n\n # readline(), seek(), tell()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n rpos = tf.tell()\n tf.seek(0)\n tf.seek(rpos)\n chunk = tf.readline()\n result += chunk\n if len(chunk) == 0:\n break\n self.assertEqual(result, content)", "def test_kyc_post_legal(self):\n pass", "def test_valid(self):\n template = '{0} just right {1}'\n value_count = 2\n try:\n validate_str_substitution(template, value_count)\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def test_empty_string(self):\n string = \"\"\n expected = \"\"\n self.assertEqual(transliterate(string), expected)", "def test_textConditionUnicode(self):\n xp = XPathQuery(u\"//*[text()='\\N{SNOWMAN}']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.quux])", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def test_text(self):\n args = [\"hello world\"]\n namespace = self.parser.parse_args(args)\n self.assertEqual(namespace.text, \"hello world\")", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def testRandomWord(self):\n word1 = self.searcher.randomWord()\n word2 = self.searcher.randomWord()\n self.assertTrue(len(word1) > 1, 'Word length too short')\n self.assertTrue(len(word2) > 1, 'Word length too short')\n self.assertNotEqual(word1, word2, 'Found the same word')", "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_art_from_taste_space(self):", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def test_str_title(self):\n post = self.post\n string = post.__str__()\n expected_string = post.text[:15]\n self.assertEqual(string, expected_string)" ]
[ "0.7778528", "0.717947", "0.71633434", "0.6949054", "0.6913475", "0.68788403", "0.6877094", "0.68400156", "0.68368715", "0.6818046", "0.6760472", "0.6753992", "0.6737595", "0.6676952", "0.6637825", "0.6612554", "0.6598669", "0.658587", "0.65414304", "0.65287834", "0.652236", "0.650319", "0.65024793", "0.6494483", "0.6466075", "0.6449126", "0.6429379", "0.6410989", "0.6404181", "0.63916576", "0.6384405", "0.6381306", "0.6381291", "0.63669235", "0.6361942", "0.63497674", "0.6342666", "0.6323378", "0.6315516", "0.6315253", "0.6312008", "0.6310804", "0.62897694", "0.62873125", "0.62678206", "0.625345", "0.62427485", "0.6236916", "0.62249154", "0.622433", "0.62196076", "0.61969775", "0.61865366", "0.6184814", "0.61811274", "0.61773455", "0.6172558", "0.61713064", "0.61557645", "0.61455977", "0.614515", "0.61435544", "0.61406606", "0.61332196", "0.6130442", "0.61269885", "0.61228764", "0.6122304", "0.6113841", "0.61086464", "0.6106468", "0.6092485", "0.60826665", "0.6082473", "0.6081128", "0.60790575", "0.6076257", "0.60712916", "0.6065985", "0.6049318", "0.60422206", "0.60388625", "0.6036061", "0.60341465", "0.6032158", "0.6026345", "0.6023328", "0.60220623", "0.6013864", "0.60077715", "0.6007719", "0.60062104", "0.60033506", "0.5998585", "0.59972274", "0.5989087", "0.598861", "0.5978529", "0.597617", "0.597333", "0.59653366" ]
0.0
-1
Test that the text is correct.
def test_unknown_text(self): metric1 = dict( type="metric_type", name="Metric", unit="units", scale="count", recent_measurements=[ dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=None, status="unknown")), ], ) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_changed") notification = Notification(self.report, [metric_notification_data1], "destination_uuid", {}) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 1 metric that is notable:\n\n" "* Metric status is white (unknown), was yellow (near target met). Value is ? units, was 0 units.\n", text, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def test_plain_text():\n source_file = os.path.join(_TESTS_DIR, 'plain_text_sample.txt')\n file_reports = psca.analyze([source_file], _SETTINGS_FILE,\n profile='test_04')\n reports = file_reports[0].reports\n # The elements of the found_errors and expected_errors sets are\n # tuples with two elements: (<rule code>, <line>)\n found_errors = {(r.rule_code, r.line) for r in reports}\n expected_errors = {\n (4, 7),\n (5, None),\n (6, 3),\n }\n undetected_errors = expected_errors - found_errors\n assert len(undetected_errors) == 0, \\\n f'Undetected errors: {undetected_errors}'\n unexpected_errors = found_errors - expected_errors\n assert len(unexpected_errors) == 0, \\\n f'Unexpected errors: {unexpected_errors}'", "def test_text_field():", "def assertText(self,content,expected_text,description=\"\"): \n self.assertTrue(expected_text in content,\n \"expected to find '{0}' but found '{1}' instead.\\\n Attemted action: {2}\".format(expected_text, \n content,\n description))", "def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def testText(self):\n lc = self.CreateConsole()\n contents = \"\"\n self.assertEqual(contents, lc.GetText())\n for str in ('a', 'foo', '\\n\\n\\n', 'bar\\nbaz\\n choke choke zapf'):\n contents += str\n lc.AppendText(str)\n self.assertEqual(contents, lc.GetText())", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def assert_text(self, path, contents):\n assert isinstance(contents, text_type)\n data = self.fs.gettext(path)\n self.assertEqual(data, contents)\n self.assertIsInstance(data, text_type)", "def test_textlines_field():", "def test_text(self):\n result = self._do_output(o.TextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def testIsText(self):\n parser = text_parser.PyparsingSingleLineTextParser()\n\n bytes_in = b'this is My Weird ASCII and non whatever string.'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = 'Plaso Síar Og Raðar Þessu'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'\\x01\\\\62LSO\\xFF'\n self.assertFalse(parser._IsText(bytes_in))\n\n bytes_in = b'T\\x00h\\x00i\\x00s\\x00\\x20\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii\\x00'\n self.assertTrue(parser._IsText(bytes_in))\n\n bytes_in = b'Ascii Open then...\\x00\\x99\\x23'\n self.assertFalse(parser._IsText(bytes_in))", "def test_initialization_of_homework_text():\n assert oop_hw.text == \"Learn OOP\"", "def test_text_is_correct(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n 'lang': 'en'\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']", "def test_third_equal(self):\n self.assertEqual(heaviest_word(\"take me to semynak\"), \"semynak\")", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def test_valid_text_str(self):\n f = lws.valid_text\n assert f('string', r'[a-z]*') is True\n assert f('string', r'string') is True\n assert f('string', r'[0-9]*') is False\n assert f('', r'.*') is False\n assert f('abcde', lambda x: 'e' in x) is True\n assert f('abcde', lambda x: 'f' in x) is False", "def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))", "def assertTextEq(self, a, b, msg=None):\n trans = str.maketrans({\n \" \": \"\",\n \"\\n\": \"\"\n })\n\n if isinstance(a, iAntlr4GramElem):\n a = a.toAntlr4()\n\n if isinstance(b, iAntlr4GramElem):\n b = b.toAntlr4()\n\n a = a.translate(trans)\n b = b.translate(trans)\n \n self.assertEqual(a, b, msg=msg)", "def test_training_content(self):\n self.assertIsInstance(self.one_off_training.content, str)\n self.assertEqual(self.one_off_training.content, \"1h d'endurance\")", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def test_email_text():\n new_text = mailroom.compose_email(\"Willie Nelson\", 12.34)\n reference_text = \"\\nDear Willie Nelson,\\n\\\nThank you for your generous gift of $12.34! It will help Local Charity\\\n achieve our mission.\\n\\\nBest regards,\\n\\\nLocal Charity\\n\\n\"\n assert new_text == reference_text", "def test_lessthan(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)", "def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass", "def test_prep_textarea(self):\n pass", "def test_validate_text_input(self):\n region = \"Bayern\"\n region_false = \"B\"\n a = validate_text_input(region)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_text_input(region_false)\n self.assertTrue(\"Kein gültiger Wert für Region\" or\n \"No valid value for region\" in\n str(context.exception))", "def test_issue859(en_tokenizer, text):\n doc = en_tokenizer(text)\n assert doc.text == text", "def test_get_lyrics_invalid_format(bot):\n assert get_lyrics('asdf', 1) == 'Invalid format!'", "def test_good_transcript(self):\r\n good_sjson = _create_file(content=textwrap.dedent(\"\"\"\\\r\n {\r\n \"start\": [\r\n 270,\r\n 2720\r\n ],\r\n \"end\": [\r\n 2720,\r\n 5430\r\n ],\r\n \"text\": [\r\n \"Hi, welcome to Edx.\",\r\n \"Let&#39;s start with what is on your screen right now.\"\r\n ]\r\n }\r\n \"\"\"))\r\n\r\n _upload_sjson_file(good_sjson, self.item.location)\r\n self.item.sub = _get_subs_id(good_sjson.name)\r\n\r\n text, filename, mime_type = self.item.get_transcript()\r\n\r\n expected_text = textwrap.dedent(\"\"\"\\\r\n 0\r\n 00:00:00,270 --> 00:00:02,720\r\n Hi, welcome to Edx.\r\n\r\n 1\r\n 00:00:02,720 --> 00:00:05,430\r\n Let&#39;s start with what is on your screen right now.\r\n\r\n \"\"\")\r\n\r\n self.assertEqual(text, expected_text)\r\n self.assertEqual(filename[:-4], self.item.sub)\r\n self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')", "def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")", "def test_text(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_text')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_text ' \\\n '( value TEXT NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_text VALUES (%s)'\n for i in range(10):\n item = random_string(100000)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_text'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, unicode)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_text')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_text')\n cursor.execute(query)\n conn.commit()", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Three of a Kind As')", "def test_read_text(pdf_path):\n pdf_reader = PdfReader(path=pdf_path)\n text = pdf_reader.ocr_text()\n\n # We hard code this comparison to keep track of all changes to this metric\n assert pdf_reader.mean_confidence == 89\n assert pdf_reader.page_confidences == [86, 91]\n\n # Check if we have two pages seperated by pagebreaks\n assert len(text.split('\\f')) == 2\n\n # The same content can be extracted from the pages property\n assert '\\f'.join(pdf_reader.pages) == text\n\n # Content on the first page (important that this is at the beginning)\n assert 'Norwegian University of Science and Technology' in text[:50]\n\n # Content on second page (important that this is at the end)\n assert 'two requirements' in text[-50:]\n\n # The double-f in affine is hard for bad OCR algorithms\n assert 'affine' in text", "def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))", "def test_analysis_screen_with_clean_text(client, text_to_analyse):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': text_to_analyse})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def verify(self):\n\n # tekstlig testing om koden fungerer\n text = self.klar_tekst_start + \" ble sendt til mottaker som krypteringen \" + \\\n self.crypto + \".\\nMottaker dekrypterte dette til \" + self.klar_tekst_slutt\n\n return text", "def test_str(self):\n self.assertEqual(str(self.content), \"Test Content\")", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def test_analyze_text():\n # Create a lexer instance and analyze a text with some rules\n new_dict = lex._lexer(None, None).analyze_text(\n \"test\", [lex_bases.rule(\"JUMP_LINE\", r\"\\n\"), lex_bases.rule(\"TEST\", r\"test\")]\n )\n\n # Check if the returned values are correct\n assert (\n new_dict[\"token\"] == lex_bases.token(\"TEST\", \"test\")\n and new_dict[\"fit_with_a_rule\"]\n and new_dict[\"rule_that_matched\"] == lex_bases.rule(\"TEST\", r\"test\")\n )", "def test_second_equal(self):\n self.assertEqual(heaviest_word(\"what time are we climbing up to the volcano\"), \"volcano\")", "def test_from_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text", "def test_analysis_screen_with_dirty_text(client, dirty_text):\n path = reverse('text_analysis:analysis')\n response = client.get(path, {'fulltext': dirty_text})\n assert response.status_code == 200, 'Should return an `OK` status code'", "def test_character_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[1], 133)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def teststring(self):\n self.assertRaises(palindrome.NotStringError,palindrome.palin, 4)", "def validate(self, text):\n raise NotImplementedError()", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Four of a Kind Ts')", "def check_correctness(self, expected, got):\n expected_lines = expected.strip().splitlines()\n got_lines = got.strip().splitlines()\n if len(got_lines) != len(expected_lines):\n return False\n else:\n for exp, got in zip(expected_lines, got_lines):\n if self.params['strictwhitespace']:\n if exp.rstrip() != got.rstrip():\n return False\n else:\n if exp.strip().split() != got.strip().split():\n return False\n return True", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Two Pairs As and 4s')", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def test_string():", "def test_snippet_beginning_nonletter(self):\n message = Message(clean_text=u\"!I already know what this will be!!!!!\")\n self.assertEqual(\n message.snippet,\n 'I already know what...'\n )", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def test_get_survey_as_tex(self):\n generic = str(self.generic)\n should_contain = [\n \"documentclass[11pt]{article}\",\n \"title{My title}\",\n \"Test management footer.\",\n \"Aèbc?\",\n \"Bècd?\",\n \"Cède?\",\n \"\",\n ]\n for text in should_contain:\n self.assertIn(text, generic)\n specific = str(self.specific)\n should_contain = [\n \"documentclass[11pt]{report}\",\n \"title{My title}\",\n \"This is the footer.\",\n \"{Lorem ipsum dolor sit amët\",\n \"adipiscing} elit.'\",\n \"with 'K.' standing for 'Yës'\",\n \"'Nah' standing for 'No' or 'Whatever'\",\n ]\n for text in should_contain:\n self.assertIn(text, specific)", "def test_clean_description(self):\n text = '!@#$%^&*()_+1234567890-='\n self.assertEqual(sync.clean_description(text),\n '!@#$%^&*()_+1234567890-=')\n\n text = \"Darwin\\u00c2\\u00bfs Bulldog\"\n self.assertEqual(sync.clean_description(text), \"Darwin's Bulldog\")\n\n text = \"\\n\\r\\nSome<BR><br /></BR>Text\"\n self.assertEqual(sync.clean_description(text), \"\\n\\r\\nSome\\n\\nText\")", "def test_textCondition(self):\n xp = XPathQuery(\"/foo[text() = 'somecontent']\")\n self.assertEqual(xp.matches(self.e), True)", "def test_text_editor():\n assert chap2.text_editor()", "def expected_rubbish(self):", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def test_index(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(\"<h2>Please Write your Text</h2>\", result.data)", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def test_sanity(self) -> None:\n if self.report.headlines:\n return\n\n if self.report.document.paragraphs:\n self.add_error(\n \"Rubrikerna i dokumentet är felformaterade eller saknas. \"\n \"Rubrikerna ska vara skrivna i versaler och ha samma \"\n \"typsnitt, stil och storlek som brödtexten. \"\n \"Rubriker avslutas med radbrytning.\"\n )\n\n if not self.report.document.paragraphs:\n self.add_error(\"Ditt dokument är antigen tomt eller i fel format.\")", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_for_correct_updating_one(self):\r\n assert increase_sentence_count_if_we_should('one. two. three.', 3, 'a') == (4, 'one. two. three.a')", "def test_force_text_exception(self):\n class MyString(object):\n def __str__(self):\n return b'\\xc3\\xb6\\xc3\\xa4\\xc3\\xbc'\n\n __unicode__ = __str__\n\n # str(s) raises a TypeError on python 3 if the result is not a text type.\n # python 2 fails when it tries converting from str to unicode (via ASCII).\n exception = TypeError if six.PY3 else UnicodeError\n self.assertRaises(exception, force_text, MyString())", "def test_text_resource(self):\n Repository = CTSCapitainsLocalResolver([\"./tests/testing_data/farsiLit\"])\n text, metadata = Repository.__getText__(\"urn:cts:farsiLit:hafez.divan.perseus-eng1\")\n self.assertEqual(\n len(text.citation), 4,\n \"Object has a citation property of length 4\"\n )\n self.assertEqual(\n text.getTextualNode(Reference(\"1.1.1.1\")).export(output=Mimetypes.PLAINTEXT),\n \"Ho ! Saki, pass around and offer the bowl (of love for God) : ### \",\n \"It should be possible to retrieve text\"\n )", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Royal Flush')", "def test_still_needing_correction(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = \"asdasd\"\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n self.assertEqual(\n self.response2.data[\"errors\"][0][\"message\"],\n \"The token was not identified as Hebrew\",\n )", "def verify(self, text):\n\n components = text.split(self.HASHSEP)\n if len(components) != 2:\n print 'verify: cannot parse text [%s]' % text\n return False\n\n body, digest = components\n check = self.digest(body)\n\n if check == digest:\n return True\n else:\n print 'verify: Expected [%s] got [%s] text [%s]' % (\n digest, check, text)\n return False", "def test_text_default(self):\n r = Review()\n self.assertEqual(\"\", r.text)", "def test_str_post(self):\n expected_str = self.post.text[:15]\n self.assertEqual(expected_str, str(self.post))", "def test_odd_title():\n test_string = \"And now to something completely different\"\n assert (test_string, \"\") == parse_title(test_string)", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_score_text1(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches)\n\t\tself.assertEqual(obj_ut, -1)", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight Flush T high')", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "def test_invalid_text_search(aquarius_instance):\n text = \"foo_text\"\n with pytest.raises(ValueError):\n aquarius_instance.text_search(text=text, sort=\"foo_sort\")", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'One Pair 4s')", "def test_is_hand_string_correct(self):\n self.assertEqual(self.hand.getPostCurrentHandString(), 'Straight High card 5')", "def test_textAsEvent(self):\n self.assertEquals(\n textAsEvent(u\"Hello, World!\"),\n b\"data: Hello, World!\\n\\n\"\n )", "def _test_text(self, url, content, buffering):\n # read(-1), readable(), seekable()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n self.assertTrue(tf.readable())\n self.assertTrue(tf.seekable())\n self.assertEqual(tf.read(), content)\n self.assertEqual(tf.read(), \"\")\n\n # read(10)\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n chunk = tf.read(10)\n result += chunk\n if len(chunk) < 10:\n break\n self.assertEqual(result, content)\n\n # readline(), seek(), tell()\n with wfdb.io._url.openurl(url, \"r\", buffering=buffering) as tf:\n result = \"\"\n while True:\n rpos = tf.tell()\n tf.seek(0)\n tf.seek(rpos)\n chunk = tf.readline()\n result += chunk\n if len(chunk) == 0:\n break\n self.assertEqual(result, content)", "def test_kyc_post_legal(self):\n pass", "def test_valid(self):\n template = '{0} just right {1}'\n value_count = 2\n try:\n validate_str_substitution(template, value_count)\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def test_empty_string(self):\n string = \"\"\n expected = \"\"\n self.assertEqual(transliterate(string), expected)", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def test_textConditionUnicode(self):\n xp = XPathQuery(u\"//*[text()='\\N{SNOWMAN}']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.quux])", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def test_text(self):\n args = [\"hello world\"]\n namespace = self.parser.parse_args(args)\n self.assertEqual(namespace.text, \"hello world\")", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def testRandomWord(self):\n word1 = self.searcher.randomWord()\n word2 = self.searcher.randomWord()\n self.assertTrue(len(word1) > 1, 'Word length too short')\n self.assertTrue(len(word2) > 1, 'Word length too short')\n self.assertNotEqual(word1, word2, 'Found the same word')", "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_art_from_taste_space(self):", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def test_str_title(self):\n post = self.post\n string = post.__str__()\n expected_string = post.text[:15]\n self.assertEqual(string, expected_string)" ]
[ "0.7778226", "0.7179298", "0.71623003", "0.69488645", "0.6911859", "0.6878704", "0.6875989", "0.6840048", "0.68342376", "0.681757", "0.6759105", "0.6752436", "0.6735727", "0.66753113", "0.6636938", "0.6611503", "0.6597898", "0.65839463", "0.65408915", "0.6528578", "0.65214723", "0.6503039", "0.650238", "0.64941794", "0.64658", "0.64483726", "0.642917", "0.641142", "0.6401639", "0.63913816", "0.63840526", "0.63804364", "0.6380374", "0.6367654", "0.6360041", "0.63486904", "0.63410753", "0.6323593", "0.63162583", "0.6315621", "0.6312187", "0.63090557", "0.6289032", "0.62873495", "0.6267469", "0.6252967", "0.6241526", "0.6236256", "0.6226406", "0.62238294", "0.62185156", "0.6197864", "0.6187136", "0.6185654", "0.6180014", "0.6176662", "0.617311", "0.6171105", "0.61539674", "0.61449933", "0.61435235", "0.61422896", "0.61400276", "0.61341894", "0.61309606", "0.6124739", "0.6123118", "0.61229706", "0.61127913", "0.61076903", "0.6104935", "0.6093331", "0.60851496", "0.6083098", "0.60810554", "0.60790795", "0.60756", "0.6072323", "0.60637087", "0.6049839", "0.6043598", "0.6037503", "0.60370296", "0.6033113", "0.6033073", "0.6025906", "0.602371", "0.6023661", "0.6012263", "0.6007594", "0.6006228", "0.60060334", "0.600416", "0.5996581", "0.59948915", "0.5990606", "0.5988796", "0.5977437", "0.5975151", "0.59737885", "0.59651613" ]
0.0
-1
./fasttext skipgram input dumps.txt output model dim 256 minCount 1
def preexe(): from collections import OrderedDict as odict term_index = odict() term_vec = pickle.loads(open('term_vec.pkl', 'rb').read()) with open('./dumps.txt', 'r') as f: datasets = [] for fi, line in enumerate(f): if fi > 50000: break if fi%500 == 0: print("now iter {}".format(fi)) terms = line.strip().split() for slide in range(0, len(terms) - 4, 1 ): ans = terms[slide+4] buff = [] try: [buff.append(term_vec[term]) for term in terms[slide: slide+4]] except KeyError as e: continue datasets.append( (buff, ans, terms[slide: slide+5]) ) if term_index.get(ans) is None: term_index[ans] = len(term_index) open('datasets.pkl', 'wb').write(pickle.dumps(datasets)) open('term_index.pkl', 'wb').write(pickle.dumps(term_index))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_fasttext_skipgram(self, corpus_path,\n output_path,\n **kwargs):\n print(\"Training Fasttext model using Skipgram method\")\n self.fasttext_model = fasttext.train_unsupervised(corpus_path, model='skipgram', **kwargs)\n self.fasttext_model.save_model(path=output_path)\n print(\"Model saved!\")", "def skipgram(init,\n load,\n sg_model_path,\n sg_model_name,\n save_kv,\n sg_model_config,\n train,\n epochs,\n similarity,\n accuracy):\n\n # allows display info\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n # define some path variable to clean the code\n path_to_model_dir = os.path.join(sg_model_path, sg_model_name)\n path_to_model_file = os.path.join(path_to_model_dir, sg_model_name + \".model\")\n path_to_keyed_vectors_file = os.path.join(path_to_model_dir, sg_model_name + \".kv\")\n\n # use a memory-friendly iterator\n sentences = MyReviews(nb_reviews=NB_REVIEWS)\n\n if init and not load:\n # sentences / corpus = None so the model is left uninitialized\n # iter = 1 to make sure to have an uninitialized model\n # sample = The threshold for configuring which higher-frequency words are randomly downsampled, useful range is (0, 1e-5).\n model = Word2Vec(sentences=sentences,\n sg=1,\n iter=1,\n size=sg_model_config[\"size\"],\n window=sg_model_config[\"window\"],\n sample=sg_model_config[\"sample\"],\n min_count=sg_model_config[\"min_count\"],\n hs=sg_model_config[\"hs\"],\n negative=sg_model_config[\"negative\"],\n workers=sg_model_config[\"workers\"])\n\n # save the model after initialization\n model.save(path_to_model_file)\n\n elif load:\n # load the model\n model = Word2Vec.load(path_to_model_file)\n\n else:\n # the user is informed that he has to choise init or load arguments\n raise RuntimeError(\"You have either to choose parameter -init or -load\")\n\n if train:\n # train the model\n model.train(sentences=sentences,\n total_examples=model.corpus_count,\n epochs=epochs)\n\n # always save the model after training\n model.save(path_to_model_file)\n\n if save_kv:\n # save vectors representation of words\n model.wv.save(path_to_keyed_vectors_file)\n\n if similarity != \"\":\n # evaluate the model by similarity search for one word\n print(\"Words similar to \", similarity)\n print(model.most_similar(positive=[similarity]))\n\n if accuracy:\n model.wv.accuracy(questions=PATH_TO_QUESTIONS_WORDS_FILE)", "def fasttext_model(sentences, size=100, min_count=5, window=5, negative=5,\r\n cbow=True, iterations=5, seed=0, workers=1):\r\n return FastText(sentences, size=size, min_count=min_count, window=window,\r\n negative=negative, sg=not cbow, iter=iterations,\r\n seed=seed, workers=workers)", "def train_fasttext_model(infile_name, outfile_name=None, dim=100, ws=4, min_count=3, n_jobs=1,\n minn=1, maxn=2, method='cbow', epoch=30):\n\n if method.lower() == 'skip-gram':\n sg = 1\n elif method.lower() == 'cbow':\n sg = 0\n else:\n raise ValueError('skip-gram or cbow are only valid options')\n\n start = timeit.default_timer()\n model = fasttext.FastText(sg=sg, size=dim, window=ws,\n min_count=min_count, min_n=minn, max_n=maxn, workers=n_jobs)\n # model = word2vec.Word2Vec(corpus, size=vector_size, window=window, min_count=min_count, workers=n_jobs, sg=sg,\n # **kwargs)\n # corpus = word2vec.LineSentence(infile_name)\n print('>>> Start to read molecular sentences...')\n model.build_vocab(corpus_file=infile_name)\n print('Count of molecular sentences: {}, count of unique fragment: {}'.format(model.corpus_count, len(model.wv.vocab)))\n print('>>> Start to training model...')\n abc = model.train(corpus_file=infile_name, total_examples=model.corpus_count,\n epochs=epoch, total_words=len(model.wv.vocab))\n try:\n print('return values of model training: {}'.format(abc))\n except:\n pass\n if outfile_name:\n # fname = get_tmpfile(\"fasttext.model\")\n model.save(outfile_name)\n\n stop = timeit.default_timer()\n print('Runtime: ', round((stop - start) / 60, 2), ' minutes')\n return model", "def fasttext_model(\n sentences, size=100, min_count=5, negative=5, window=5,\n cbow=True, iterations=5, seed=0, workers=1):\n cbow = 0 if cbow == 1 else 1\n model = FastText(\n sentences, size=size, min_count=min_count,\n negative=negative, window=window, sg=cbow, iter=iterations,\n seed=seed, workers=workers)\n\n return model", "def skipgram(line, vocab, ws):\n\tinputs = []; labels = []\n\tword_list = line.split()\n\tfor i in range( len(word_list) ):\n\t\tinput_word = word_list[i]\n\t\tleft = word_list[max(i-ws,0) : i]\n\t\tright = word_list[i+1 : i+1+ws]\n\t\tfor context_word in left+right:\n\t\t\tif (input_word in vocab) and (context_word in vocab):\n\t\t\t\tinputs.append( vocab.index(input_word) )\n\t\t\t\tlabels.append( vocab.index(context_word) )\n\tlabels = numpy.array([labels]).T # store in a column matrix\t\n\treturn inputs, labels", "def train():\n k = len(accepted_chars)\n enc = \"UTF-8\"\n # Assume we have seen 10 of each character pair. This acts as a kind of\n # prior or smoothing factor. This way, if we see a character transition\n # live that we've never observed in the past, we won't assume the entire\n # string has 0 probability.\n counts = [[10 for i in xrange(k)] for i in xrange(k)]\n \n bigrams = filter_chars(accepted_chars, ngrams(2, counter(counts)))\n for c in open('big.txt').read().decode(enc): bigrams.send(c)\n \n # Normalize the counts so that they become log probabilities. \n # We use log probabilities rather than straight probabilities to avoid\n # numeric underflow issues with long texts.\n # This contains a justification:\n # http://squarecog.wordpress.com/2009/01/10/dealing-with-underflow-in-joint-probability-calculations/\n for row in counts:\n s = float(sum(row))\n for j in xrange(len(row)):\n row[j] = math.log(row[j] / s)\n\n # Find the probability of generating a few arbitrarily choosen good and\n # bad phrases.\n good_probs = [avg_transition_prob(line, counts) \\\n for line in open('good.txt').read().decode(enc).split('\\n') if line]\n bad_probs = [avg_transition_prob(line, counts) \\\n for line in open('bad.txt').read().decode(enc).split('\\n') if line]\n # Assert that we actually are capable of detecting the junk.\n assert min(good_probs) > max(bad_probs)\n\n # And pick a threshold halfway between the worst good and best bad inputs.\n thresh = (min(good_probs) + max(bad_probs)) / 2\n pickle.dump({'mat': counts, 'thresh': thresh}, open('gib_model.pki', 'wb'))", "def buildSkipgram(voc, maxlen=50, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n y = []\n syms = set(text) # unique symbols (chars or words)\n\n # build correct sequences of words in context\n for i in range(maxlen, len(text) - maxlen, step):\n context = text[i-maxlen/2: i+maxlen/2]\n sentences.append(context)\n y.append(1)\n\n # build out of context sequences\n for i in range(maxlen, len(text) - maxlen, step):\n random_idx = np.random.random_integers(1, len(text)-1, maxlen)\n out_of_context = [text[x] for x in random_idx]\n sentences.append(out_of_context)\n y.append(0)\n\n print('nb sequences:', len(sentences))\n \n X = np.zeros((len(sentences), maxlen), dtype=np.int)\n\n for i, sentence in enumerate(sentences):\n for j, sym in enumerate(sentence):\n X[i,j] = sym_indices[sym] \n \n y = np.asarray(y)\n\n # shuffle and return\n idx = np.random.permutation(X.shape[0])\n X = X[idx,:]\n y = y[idx]\n\n return (X,y)", "def train():\n counts = {size: dict() for size in NGRAM_SIZES}\n for word in tqdm.tqdm(word_iterator(\"resources/datasets\")):\n if word == \"\":\n continue\n for size in NGRAM_SIZES:\n for token in ngrams(word, 2 * size):\n left, right = token[:size], token[size:]\n counts[size].setdefault(left, dict())\n counts[size][left].setdefault(right, 0)\n counts[size][left][right] += 1\n model = {size: dict() for size in NGRAM_SIZES}\n for size in NGRAM_SIZES:\n for left in counts[size]:\n total = sum(counts[size][left].values())\n model[size][left] = dict()\n for right in counts[size][left]:\n model[size][left][right] = math.log(\n counts[size][left][right] / total)\n with open(MODEL_FILENAME, \"wb\") as file:\n pickle.dump(model, file)", "def tf_train_seq_data_proc_nobatch(num_txt, batch_size, max_seq):\r\n x_and_y = tf.data.Dataset.from_tensor_slices(num_txt).apply(tf.contrib.data.batch_and_drop_remainder(max_seq + 1))\r\n x_y_mapped = x_and_y.map(sep_x_y_words)\r\n output = x_y_mapped.shuffle(10000)\r\n return output", "def preprocess(args: argparse.Namespace) -> None:\n data_dir = os.path.join(args.data_dir, args.corpus)\n train_file = os.path.join(data_dir, 'train.jsonl')\n train_instances = load_jsonl(train_file, max_instances=args.max_instances)\n precompute_ngrams(train_instances)\n text1_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=True)\n text1_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=True)\n text1_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=True)\n text2_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=False)\n text2_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=False)\n text2_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=False)\n all_ngrams = list(set(text1_gram1 + text1_gram2 + text1_gram3 + text2_gram1 + text2_gram2 +\n text2_gram3))\n gram_to_dim_mapping = {ng: i for i, ng in enumerate(all_ngrams)}\n label_to_dim_mapping = map_labels_to_dims(train_instances)\n save_to_pickle(data=train_instances, fpath_out=os.path.join(\n args.serialization_dir, 'train_instances.pickle'))\n save_dict(data=gram_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'gram_mapping.json'))\n save_dict(data=label_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'label_mapping.json'))\n # save_dict(data=gram1, fpath_out=os.path.join(args.serialization_dir, '1grams.json'))\n # save_dict(data=gram2, fpath_out=os.path.join(args.serialization_dir, '2grams.json'))\n # save_dict(data=gram3, fpath_out=os.path.join(args.serialization_dir, '3grams.json'))", "def fit(self, text):\n\n with open('forward_ngram_model.pkl', 'rb') as fin:\n self.forward_model = pickle.load(fin)\n\n with open('backward_ngram_model.pkl', 'rb') as fin:\n self.backward_model= pickle.load(fin)", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n \n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n\n ### YOUR CODE HERE\n predicted = inputVectors[tokens[currentWord]]\n for cw in contextWords:\n per_cost, per_gradIn, per_gradOut = word2vecCostAndGradient(predicted, tokens[cw], outputVectors, dataset)\n cost += per_cost\n gradIn[tokens[currentWord]] += per_gradIn\n gradOut += per_gradOut\n '''\n predicted = inputVectors[tokens[currentWord]] # embedding vector\n one_hot_current = np.zeros(inputVectors.shape[0])\n one_hot_current[tokens[currentWord]] += 1\n for i in range(len(contextWords)):\n per_cost, gradPred, grad = word2vecCostAndGradient(predicted, tokens[contextWords[i]], outputVectors, dataset)\n cost += per_cost\n gradIn += np.outer(one_hot_current, gradPred)\n gradOut += grad\n '''\n ### END YOUR CODE\n\n return cost, gradIn, gradOut", "def train_and_generate(text_path):\n\n print(\"\\n------------------ ff.io Parameters ------------------\")\n print(f\"Generate text length: {text_length}\")\n print(f\"Sequence length: {seq_length}\\n\")\n print(f\"{layers_count} layers with dimension {layers_dim}\")\n print(f\"{epoch_num} epochs with batch size {batch_s}\\n\")\n\n text = read_text(text_path)\n\n if load_model:\n print(\"Loading model from file.\")\n\n if model_type == 'word':\n print(\"Creating word maps.\")\n characters, n_to_char, char_to_n = word_map(text)\n \n else: # Default to character maps\n print(\"Creating character maps.\")\n characters, n_to_char, char_to_n = character_map(text)\n\n if seed_text:\n seed_text_str = read_text(seed_text_filepath)\n\n print(\"Processing text.\")\n X, Y, characters, n_to_char = process_text(text, characters, n_to_char, char_to_n)\n\n print(\"Modelling\\n\")\n mod = model(X, Y, characters)\n\n gen_text = generate_text(mod, text_length, text, X, characters, n_to_char, char_to_n, seed_text_str = seed_text_str)\n\n return gen_text", "def generate_text(model, w2vmodel, nb_epoch, length=75, max_seq_length=20, seed=\"Rain drop drop top\"):\n global sample\n generated = ''\n sequences = seed\n\n generated += seed\n\n #clean seed\n seed=re.sub(r'<[^<]+?>', '', seed)\n #remove encoding characters like \\x86\n seed=re.sub(r'[^\\x00-\\x7f]','',seed)\n seed=re.sub(r'\\#','',seed)\n #remove punctuation\n seed=re.sub(r'[^A-Za-z0-9\\s]','',seed)\n\n #shorten if longer than max_seq_length\n seed = seed.split(' ')[:max_seq_length]\n\n word_ix_list = []\n for word in seed:\n try:\n word = word_to_ix(word,w2vmodel)\n except:\n #since we're using -1 as a null word (why we also pad with the not in vocab index), we'll use that for words that aren't in the word2vec model\n print('Warning: {0} not contained in training vocabulary. It will be ignored when computing output.'.format(word))\n word = word_to_ix('_UNSEEN_',w2vmodel)\n word_ix_list.append(word)\n\n #pad word_list with the unseen word2vec if shorter than max_seq_length\n word_ix_list = [word_to_ix('_UNSEEN_',w2vmodel)] * (max_seq_length-len(word_ix_list)) + word_ix_list\n\n for temp in [0.2, 0.5, .75, 1.0]:\n print('temperature: ', temp)\n for word in range(length):\n #reshape wordlist\n word_ix_list = np.asarray(word_ix_list).reshape(1,max_seq_length)\n\n #prediction = model.predict(x=word_ix_list)\n #next_ix = np.argmax(prediction)\n prediction = model.predict(x=word_ix_list,verbose=0)[0]\n next_ix = sample(prediction, temp)\n predicted_word = ix_to_word(next_ix,w2vmodel)\n\n generated += (' ' + predicted_word) #add predicted word to the generated output\n\n #remove first word from the word list to reduce the array for the max sequence length for the model\n word_ix_list = np.append(word_ix_list,next_ix)\n word_ix_list.shape\n word_ix_list = np.delete(word_ix_list,0,0)\n print(generated)\n print('-----')\n #print(generated)\n return", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n\n ### YOUR CODE HERE\n currentWordIndex = tokens[currentWord]\n currentWordVector = inputVectors[currentWordIndex,:]\n costs = []\n currentWordGradIn = np.zeros(inputVectors.shape[1])\n #print currentWordIndex\n\n for word in contextWords:\n target = tokens[word]\n singleCost, singleGradIn, singleGradOut = word2vecCostAndGradient(currentWordVector,target,outputVectors,dataset)\n cost += singleCost\n currentWordGradIn += singleGradIn\n gradOut += singleGradOut\n\n gradIn[currentWordIndex,:] = currentWordGradIn\n ### END YOUR CODE\n\n return cost, gradIn, gradOut", "def get_unweighted_text_embeddings(\n text_encoder: CLIPTextModel,\n text_input: torch.Tensor,\n chunk_length: int,\n clip_skip: int,\n eos: int,\n pad: int,\n no_boseos_middle: Optional[bool] = True,\n):\n max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)\n if max_embeddings_multiples > 1:\n text_embeddings = []\n pool = None\n for i in range(max_embeddings_multiples):\n # extract the i-th chunk\n text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()\n\n # cover the head and the tail by the starting and the ending tokens\n text_input_chunk[:, 0] = text_input[0, 0]\n if pad == eos: # v1\n text_input_chunk[:, -1] = text_input[0, -1]\n else: # v2\n for j in range(len(text_input_chunk)):\n if text_input_chunk[j, -1] != eos and text_input_chunk[j, -1] != pad: # 最後に普通の文字がある\n text_input_chunk[j, -1] = eos\n if text_input_chunk[j, 1] == pad: # BOSだけであとはPAD\n text_input_chunk[j, 1] = eos\n\n # -2 is same for Text Encoder 1 and 2\n enc_out = text_encoder(text_input_chunk, output_hidden_states=True, return_dict=True)\n text_embedding = enc_out[\"hidden_states\"][-2]\n if pool is None:\n pool = enc_out.get(\"text_embeds\", None) # use 1st chunk, if provided\n if pool is not None:\n pool = train_util.pool_workaround(text_encoder, enc_out[\"last_hidden_state\"], text_input_chunk, eos)\n\n if no_boseos_middle:\n if i == 0:\n # discard the ending token\n text_embedding = text_embedding[:, :-1]\n elif i == max_embeddings_multiples - 1:\n # discard the starting token\n text_embedding = text_embedding[:, 1:]\n else:\n # discard both starting and ending tokens\n text_embedding = text_embedding[:, 1:-1]\n\n text_embeddings.append(text_embedding)\n text_embeddings = torch.concat(text_embeddings, axis=1)\n else:\n enc_out = text_encoder(text_input, output_hidden_states=True, return_dict=True)\n text_embeddings = enc_out[\"hidden_states\"][-2]\n pool = enc_out.get(\"text_embeds\", None) # text encoder 1 doesn't return this\n if pool is not None:\n pool = train_util.pool_workaround(text_encoder, enc_out[\"last_hidden_state\"], text_input, eos)\n return text_embeddings, pool", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n ### YOUR CODE HERE\n\n # Find the position of the context word and extract\n # the predicted vector that will be the input for skipgram function\n # (1) predicted: extract the predicted vector\n # (the location of the center word)\n # in the inputVector\n currentIndex = tokens[currentWord]\n predicted = inputVectors[currentIndex ]\n\n # iterate through each target context word and find the cost and gradient\n # the cost and gradient of prediction is the sum of cost and gradient\n # for all context words\n for word in contextWords:\n # target: find the location of each context words\n # from the token dictionary\n target = tokens[word]\n # Input all variables into the function selected\n # both function softMax.. and negSampling.. have the same the same inputs\n # outputVectors: use as is\n word_cost, word_gradPred, word_grad = word2vecCostAndGradient(\n predicted = predicted,\n target = target,\n outputVectors = outputVectors,\n dataset = dataset)\n\n # add each variable\n cost += word_cost\n # Gradient of v_c the gradient of predicted vector\n # Gradient of v_w for other indices = 0\n gradIn[currentIndex] += word_gradPred\n gradOut += word_grad\n\n ### END YOUR CODE\n return cost, gradIn, gradOut", "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "def decode(args: Dict[str, str]):\n threshold = 2.0\n test_data = read_corpus(args['TEST_SOURCE_FILE'], source='src')\n\n print(f\"load model from {args['MODEL_PATH_I']}\", file=sys.stderr)\n model_I = NMT.load(args['MODEL_PATH_I'])\n model_I.encoder.dropout = nn.Dropout(0.)\n\n ces_I = []\n with torch.no_grad():\n for sent in tqdm(test_data, desc='Decoding', file=sys.stdout):\n loss = model_I([sent]).item()\n ce = loss / len(sent)\n ces_I.append(ce)\n\n print(f\"load model from {args['MODEL_PATH_N']}\", file=sys.stderr)\n model_N = NMT.load(args['MODEL_PATH_N'])\n model_N.encoder.dropout = nn.Dropout(0.)\n\n ces_N = []\n with torch.no_grad():\n for sent in tqdm(test_data, desc='Decoding', file=sys.stdout):\n loss = model_N([sent]).item()\n ce = loss / len(sent)\n ces_N.append(ce)\n\n ces_diff = []\n for ce_I, ce_N in zip(ces_I, ces_N):\n ces_diff.append(ce_I - ce_N)\n\n selected = 0\n with open(args['OUTPUT_FILE'], 'w') as f:\n for words, ce in zip(test_data, ces_diff):\n if (ce < threshold):\n selected += 1\n words = words[1:-1:1]\n sent = (\"\".join(words)).replace(\"▁\", \" ▁\").strip()\n # f.write(str(ce) + ' ')\n f.write(sent + '\\n')\n\n print(\"%d out of %d sentences selected.\" % (selected, len(test_data)))", "def raw_text_to_mmap(args):\n MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)\n assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids\n all_files = glob.glob(f'{args.input_dir}/c4-*')\n print(len(all_files), MMapTextDataset.tokenizer)\n if os.path.exists(f'{args.output_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'):\n logger.info(\"Cache already exists. Remove the cache directory to regenerate\")\n return\n try:\n os.mkdir(f'{args.output_dir}/cache/')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'{args.output_dir}/shards-{args.shard_size}/')\n except FileExistsError:\n pass\n try:\n os.mkdir(f'{args.output_dir}/logs-{args.shard_size}/') # log progrss to be able to resume\n except FileExistsError:\n pass\n\n # STEP1: tokenizing and saving to shards\n if args.num_preprocessing_workers > 1:\n from multiprocessing.pool import Pool\n with Pool(args.num_preprocessing_workers) as p:\n list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files)))\n else:\n [MMapTextDataset._process_file(f) for f in tqdm(all_files)]\n\n if args.data_type == 'raw_text': # c4 tfrecords are already sharded\n # STEP2: shuffling shards and combining them into train.bin and val.bin files\n all_shards = glob.glob(f'{args.output_dir}/shards-{args.shard_size}/*.bin')\n random.shuffle(all_shards) # shuffling based on shards not individual lines\n val_shards_count = int(args.train_dev_split * len(all_shards))\n val_shards = all_shards[:val_shards_count]\n train_shards = all_shards[val_shards_count:]\n # TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to\n # update the dataset to read from multiple shards directly\n MMapTextDataset._combine_shards(f'{args.output_dir}/cache/val.bin', val_shards)\n MMapTextDataset._combine_shards(f'{args.output_dir}/cache/train.bin', train_shards)\n elif args.data_type == 'tfrecord':\n train_shards = glob.glob(f'{args.output_dir}/*train*.bin')\n val_shards = glob.glob(f'{args.output_dir}/*val*.bin')\n MMapTextDataset._combine_shards(f'{args.output_dir}/val.bin', val_shards)\n MMapTextDataset._combine_shards(f'{args.output_dir}/train.bin', train_shards)\n del MMapTextDataset.tokenizer", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient = softmaxCostAndGradient):\n\n # Implement the skip-gram model in this function.\n\n # Inputs:\n # - currrentWord: a string of the current center word\n # - C: integer, context size\n # - contextWords: list of no more than 2*C strings, the context words\n # - tokens: a dictionary that maps words to their indices in\n # the word vector list\n # - inputVectors: \"input\" word vectors (as rows) for all tokens\n # - outputVectors: \"output\" word vectors (as rows) for all tokens\n # - word2vecCostAndGradient: the cost and gradient function for\n # a prediction vector given the target word vectors,\n # could be one of the two cost functions you\n # implemented above\n\n # Outputs:\n # - cost: the cost function value for the skip-gram model\n # - grad: the gradient with respect to the word vectors\n # We will not provide starter code for this function, but feel\n # free to reference the code you previously wrote for this\n # assignment!\n\n ### YOUR CODE HERE\n (N, D) = inputVectors.shape\n\n target_index = tokens[currentWord]\n\n context_indices = np.zeros(len(contextWords), dtype = np.uint32)\n for i, w in enumerate(contextWords): context_indices[i] = tokens[w]\n\n v = inputVectors[context_indices, :]\n\n cost = 0.\n gradIn = np.zeros_like(inputVectors)\n gradOut = np.zeros_like(outputVectors)\n\n for j in context_indices:\n (c, gout, gin) = word2vecCostAndGradient(outputVectors[j], target_index, inputVectors, dataset)\n\n cost += c\n gradIn += gin\n gradOut[j, :] += gout\n\n ### END YOUR CODE\n\n return cost, gradIn, gradOut", "def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_index=0):\n # global data_index # you can put data_index outside the function, then\n # modify the global data_index in the function without return it.\n # note: without using yield, this code use data_index to instead.\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels, data_index", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def skipgram(current_center_word, outside_words, word2ind,\n center_word_vectors, outside_vectors, dataset,\n word2vec_loss_and_gradient=naive_softmax_loss_and_gradient):\n loss = 0.0\n grad_center_vecs = np.zeros(center_word_vectors.shape)\n grad_outside_vectors = np.zeros(outside_vectors.shape)\n\n ### YOUR CODE HERE\n center_word_idx = word2ind[current_center_word]\n center_word_vec = center_word_vectors[center_word_idx] \n \n for outside_word in outside_words:\n outside_word_idx = word2ind[outside_word]\n _loss_sk, _grad_center_vecs, _grad_outside_vectors = \\\n word2vec_loss_and_gradient(\n center_word_vec, outside_word_idx, outside_vectors, dataset)\n loss += _loss_sk\n grad_center_vecs[center_word_idx] += _grad_center_vecs\n grad_outside_vectors += _grad_outside_vectors \n ### END YOUR CODE\n\n return loss, grad_center_vecs, grad_outside_vectors", "def train_ngram_lm(kenlm_path, data_path, output_path, N):\n # create .arpa file of n-grams\n curdir = os.path.abspath(os.path.curdir)\n #\n command = \"bin/lmplz -o \"+str(N)+\" <\"+os.path.join(curdir, data_path) + \\\n \" >\"+os.path.join(curdir, output_path)\n os.system(\"cd \"+os.path.join(kenlm_path, 'build')+\" && \"+command)\n\n load_kenlm()\n # create language model\n model = kenlm.Model(output_path)\n\n return model", "def train_ngram_lm(kenlm_path, data_path, output_path, N):\n # create .arpa file of n-grams\n curdir = os.path.abspath(os.path.curdir)\n #\n command = \"bin/lmplz -o \"+str(N)+\" <\"+os.path.join(curdir, data_path) + \\\n \" >\"+os.path.join(curdir, output_path)\n os.system(\"cd \"+os.path.join(kenlm_path, 'build')+\" && \"+command)\n\n load_kenlm()\n # create language model\n model = kenlm.Model(output_path)\n\n return model", "def print_examples(example_iter, model, num=0, max_len=100,\n bos_index=1,\n src_eos_index = None,\n trg_eos_index = None,\n src_vocab=None, trg_vocab=None):\n model.eval()\n count=0\n\n BOS_TOKEN = \"<s>\"\n EOS_TOKEN = \"</s>\"\n UNK_TOKEN = \"<unk>\"\n\n if src_vocab is not None and trg_vocab is not None:\n src_bos_index = src_vocab.stoi[BOS_TOKEN]\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_unk_index = trg_vocab.stoi[UNK_TOKEN]\n # trg_bos_index = trg_vocab.stoi[BOS_TOKEN]\n # trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_bos_index = 0\n src_eos_index = 1\n trg_unk_index = 2\n # trg_bos_index = 1\n # trg_eos_index = None\n\n for i, batch in enumerate(example_iter, 1):\n src = batch.src.cpu().numpy()[0, :]\n trg_idx = batch.trg_idx.cpu().numpy()[0, :]\n\n # remove </s>\n src = src[1:] if src[0]==src_bos_index else src\n src = src[:-1] if src[-1]==src_eos_index else src\n # trg = trg[:-1] if trg[-1]==trg_eos_index else trg\n\n result = greedy_decode(model, batch.src_idx, batch.src_mask, batch.src_lengths)\n print()\n print(\"Example %d\" % i)\n print(\"Source: \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print()\n print(\"Target: \", set(lookup_words(trg_idx, vocab=trg_vocab)))\n print()\n print(\"Prediction: \", \" \".join(lookup_words(result[0], vocab=trg_vocab)))\n\n count += 1\n if count == num:\n break", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def main():\n logging.basicConfig(level=logging.WARN)\n\n text = extract()\n text, char_indices, indices_char, x, y = transform(text)\n model(text, char_indices, indices_char, x, y)\n\n pass", "def fasttext_wordvectors(corpus_path, model_path):\n model = fasttext.train_unsupervised(corpus_path)\n model.save_model(model_path)\n return model", "def main():\n if args.file and not args.nomodel:\n text = read_file(args.file)\n trained_model = train_char_model(text, args.prev)\n save_model(trained_model, args.file)\n sys.exit()\n if args.model:\n trained_model = load_model(args.model)\n if args.nomodel and args.file:\n trained_model = train_char_model(read_file(args.file), args.prev)\n # generate some random text\n history = check_history(trained_model, args.prev)\n gentext = generate_text(trained_model, history, args.gen)\n print(gentext)", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n\tdataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n\tcost = 0.0 \n\tgradIn = np.zeros(inputVectors.shape)\n\tgradOut = np.zeros(outputVectors.shape)\n\n\tcword_idx = tokens[currentWord]\n\tvhat = inputVectors[cword_idx]\n\n\tfor j in contextWords:\n\t\tu_idx = tokens[j]\n\t\tc_cost, c_grad_in, c_grad_out = \\\n\t\t\tword2vecCostAndGradient(vhat, u_idx, outputVectors, dataset)\n\t\tcost += c_cost \n\t\tgradIn[cword_idx] += c_grad_in\n\t\tgradOut += c_grad_out \n\n\treturn cost, gradIn, gradOut", "def skipgram_model(vocabulary_size, embedding_size, batch_size, num_sampled, valid_examples,\n learning_rate):\n graph = tf.Graph()\n with graph.as_default():\n # Input data.\n tf_train_dataset = tf.placeholder(tf.int32, shape=[batch_size])\n tf_train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n\n # Variables.\n embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n # This is actually transposed compared to usual layer weights. The std is\n # deduced accordingly, from the input size (embedding_size).\n softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / np.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, tf_train_dataset)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(\n softmax_weights, softmax_biases, embed, tf_train_labels, num_sampled, vocabulary_size))\n\n # Optimizer.\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)\n\n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n\n similarity = None\n if valid_examples is not None:\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': tf_train_dataset,\n 'labels_ph': tf_train_labels }\n\n return tf_graph, optimizer, loss, normalized_embeddings, similarity", "def skipgram(_input: List[str], N: int, skip: Optional[int] = None,\n delim: str = ' ', skip_delim: str = '_') -> List[str]:\n max_len = len(_input)\n # Delimiter must be a string\n # assert isinstance(delim, basestring)\n # assert isinstance(skip_delim, basestring)\n\n ngram_tokens = []\n\n for start in range(max_len):\n for n in range(1, min(N+1, max_len+1)):\n end = start + n\n if end > len(_input):\n break\n ngram_tokens.append(delim.join(_input[start:end]))\n\n if skip:\n\n for s in range(1, skip+1):\n\n skipped = skip_delim.join(_input[start:end:s])\n # if skipped not in ngram_tokens:\n ngram_tokens.append(skipped)\n\n return ngram_tokens", "def make_simple_skipgram_loss(clip=None):\n def loss(edge_logits, num_vertex, edge_list, edge_weights, params):\n with tf.name_scope('skipgram_loss', values=[edge_logits, edge_list, edge_weights]):\n if len(edge_list.shape) == 3:\n batch_size = tf.to_float(tf.shape(edge_list)[0])\n else:\n batch_size = 1.\n\n edge_present = tf.to_float(tf.equal(edge_weights, 1))\n\n # values of -1 in the weights indicate padded edges which should be ignored\n # in loss computation.\n edge_censored = tf.to_float(tf.not_equal(edge_weights, -1))\n\n edge_pred_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=edge_present, logits=edge_logits)\n\n edge_pred_loss = edge_pred_loss * edge_censored\n\n if clip:\n edge_pred_loss = tf.clip_by_value(edge_pred_loss, 0, clip)\n\n # sum instead of (tf default of) mean because mean screws up learning rates for embeddings\n loss_value = tf.divide(tf.reduce_sum(edge_pred_loss), batch_size,\n name='skipgram_edge_loss')\n return loss_value\n\n return loss", "def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n\n idx = tokens[currentWord] # tokens['a'] = 1\n input_vector = inputVectors[idx:idx+1]\n\n for context in contextWords:\n c, g_in, g_out = word2vecCostAndGradient(input_vector, tokens[currentWord], outputVectors, dataset)\n cost += c\n gradIn[idx:idx+1, :] += g_in\n gradOut += g_out\n\n return cost, gradIn, gradOut", "def train(args: Dict):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print('use device: %s' % device)\n\n train_data_src = read_corpus(args['--train-src'], source='src')\n train_data_tgt = read_corpus(args['--train-tgt'], source='tgt')\n\n dev_data_src = read_corpus(args['--dev-src'], source='src')\n dev_data_tgt = read_corpus(args['--dev-tgt'], source='tgt')\n\n train_data = list(zip(train_data_src, train_data_tgt))\n dev_data = list(zip(dev_data_src, dev_data_tgt))\n\n train_batch_size = int(args['--batch-size'])\n N = int(args['--N'])\n d_model = int(args['--d_model'])\n d_ff = int(args['--d_ff'])\n h = int(args['--h'])\n dropout = float(args['--dropout'])\n\n valid_niter = int(args['--valid-niter'])\n log_every = int(args['--log-every'])\n model_save_path = args['--save-to']\n lr=float(args['--lr'])\n\n vocab = Vocab.load(args['--vocab'])\n vocab_mask = torch.ones(len(vocab.tgt))\n vocab_mask[vocab.tgt['<pad>']] = 0\n\n model = make_model(len(vocab.src), len(vocab.tgt), N, d_model, d_ff, h, dropout)\n model = model.to(device)\n\n optimizer = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9))\n\n num_trial = 0\n train_iter = patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0\n cum_exmaples = report_examples = epoch = valid_num = 0\n hist_valid_scores = []\n train_time = begin_time = time.time()\n print('begin Maximum Likelihood Training')\n\n while True:\n epoch += 1\n for src_sents, tgt_sents in batch_iter(train_data, batch_size=train_batch_size, shuffle=True):\n train_iter += 1\n optimizer.zero_grad()\n batch_size = len(src_sents)\n\n example_losses = - model(src_sents, tgt_sents) #(batch_size,)\n batch_loss = example_losses.sum()", "def generate_batch(batch_size, num_skips, skip_window):\n # global keyword gives this function access to global variable data_index\n global data_index\n assert batch_size % num_skips == 0 \n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1\n # Create a double-ended queue (both stack and queue) for word buffer\n # maxlen - keeping a fixed sliding window \n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n # Shift the skipgram window to the left by 1\n buffer.append(data[data_index])\n # Increase data_index for next shift\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n # target label at the center of the buffer \n target = skip_window \n # avoid the target word and later selected words\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n # batch is the same word for current num_skip\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels", "def nangdok(data_dir, batch_size, test_max_size, **kwargs):\n join = lambda f: _path.join(data_dir, f)\n texts = []\n with open(join(\"script_nmbd_by_sentence.txt\"), encoding=\"utf-16-le\") as f:\n tmp = []\n for line in f.readlines():\n if line.startswith(\"<\"):\n texts.append(tmp)\n tmp = []\n elif _re.match(r\"^\\d+\\..*\", line):\n tmp.append(line)\n texts.append(tmp)\n del texts[0]\n participants = sorted(filter(lambda l: _re.match(\"^[fm][v-z][0-9]+\", l),\n _os.listdir(data_dir)))\n test_sentences = kwargs.get(\"test_sentences\",\n [_random.choice(ts) for ts in texts])\n test_participants = kwargs.get(\"test_participants\",\n [_random.choice(list(g))\n for _, g in _groupby(participants, lambda p: p[:2])])\n train = []\n test = []\n for participant in sorted(participants):\n for i, _ in enumerate(texts):\n for j, text in enumerate(_):\n f = join(\"{0}/{0}_t{1:0>2}_s{2:0>2}.wav\".format(participant, i+1, j+1))\n if _path.isfile(f):\n if text in test_sentences or participants in test_participants:\n test.append((f, text))\n else:\n train.append((f, text))\n _random.shuffle(test)\n valid = test[:batch_size]\n if test_max_size and batch_size + test_max_size < len(test):\n test = test[batch_size:(batch_size + test_max_size)]\n else:\n test = test[batch_size:]\n return train, valid, test", "def generate_markov_text(self, file, size=15, sent=7000):\n\t\tseed = random.randint(0, self.word_size-3)\n\t\tend = \"</f>\"\n\t\t# print(seed)\n\t\tseed_word, next_word = self.words[seed], self.words[seed+1]\n\t\tw1, w2 = seed_word, next_word\n\t\trestart = 0\n\n\t\twith open (file, 'a') as output: # 'append' instead of 'w'\n\t\t\tindex = 18001 # the previous 18k sentences are already written down in file \n\t\t\tfor i in range(sent):\n\t\t\t\tgen_words = [] # record one sentence\t\n\t\t\t\tfor j in range(1, size):\n\t\t\t\t\tgen_words.append(w1)\n\t\t\t\t\t# when comes to the end of words, restart with a new random seed number for w1 and w2\n\t\t\t\t\tif w2 == end:\n\t\t\t\t\t\trestart += 1 # record the restarting number\n\t\t\t\t\t\tseed = random.randint(0, self.word_size-3)\n\t\t\t\t\t\tw1, w2 = self.words[seed], self.words[seed+1]\n\t\t\t\t\tw1, w2 = w2, random.choice(self.cache[(w1, w2)])\n\t\t\t\tgen_words.append(w2)\n\t\t\t\t# print(str(i+1) + '. ' + ' '.join(gen_words))\n\t\t\t\tsentence = ' '.join(gen_words)\n\t\t\t\toutput.write(str(index)+'\\t0000000\\t'+str(sentence)+'\\tnegatif\\n')\n\t\t\t\tindex += 1\n\t\toutput.close()\n\t\t# print(restart)", "def wtrie_data(lines, suffix, pre_train):\n if pre_train:\n file_path = str(save_path) + '/news_{}'.format(str(suffix))\n if file_path.split('/')[-1] in os.listdir(save_path):\n _error('{} exists'.format(file_path))\n raise FileExistsError\n\n _info('Save {} \\n'.format(file_path))\n with codecs.open(file_path, 'w', 'utf-8') as file:\n if pre_train:\n for line in lines:\n # if TPU available, no need to cut the sentences with long length,\n # However, Do you think we could use TPU for training ?\n if len(line) <= 50:\n line = list(map(_to_str, line))\n file.write(' '.join(line) + '\\n')\n file.flush()\n else:\n if type(lines) is not zip:\n _error('for fine tune, the data type should be zip', head='TYPE ERROR')\n raise TypeError\n file_path = 'data/chat_idx.txt'\n with codecs.open(file_path, 'w', 'utf-8') as file:\n for que, ans in lines:\n que = list(map(_to_str, que)) # IMPORTANT\n ans = list(map(_to_str, ans))\n if (len(que) != 0) and (len(ans) != 0):\n line = ' '.join(que) + '=' + ' '.join(ans)\n file.write(line + '\\n')\n file.flush()\n else:\n continue", "def main():\n data_path = \"ner_data/\"\n vector_file = \"ner_vectors.txt\"\n train_path = os.path.join(data_path, \"train_data.txt\")\n dev_path = os.path.join(data_path, \"dev_data.txt\")\n test_path = os.path.join(data_path, \"test_data.txt\")\n vector_path = os.path.join(data_path, vector_file)\n\n # NER中暂时不用\n # bigram_path = os.path.join(data_path, \"words_for_training\")\n # dict_path = os.path.join(data_path, \"PinyinDict.txt\")\n\n char_to_id, tag_to_id, char_vectors = _ner_build_vocab(vector_path, train_path)\n # pinyin_dict = _read_pinyin_dict(dict_path)\n # Save char_dict and tag_dict\n _save_vocab(char_to_id, os.path.join(data_path, \"char_to_id\"))\n _save_vocab(tag_to_id, os.path.join(data_path, \"tag_to_id\"))\n print(\"char dictionary size \" + str(len(char_to_id)))\n print(\"tag dictionary size \" + str(len(tag_to_id)))\n\n # train_char, train_tag, train_dict, train_len = _file_to_char_ids(train_path, char_to_id, tag_to_id, pinyin_dict)\n # train_char, train_tag, train_len, train_seg = _ner_file_to_char_ids(train_path, char_to_id, tag_to_id)\n # print(\"train dataset: \" + str(len(train_char)) + \" \" + str(len(train_tag)))\n\n # dev_char, dev_tag, dev_dict, dev_len = _file_to_char_ids(dev_path, char_to_id, tag_to_id, pinyin_dict)\n dev_char, dev_tag, dev_len, dev_seg = _ner_file_to_char_ids(dev_path, char_to_id, tag_to_id)\n print(\"dev dataset: \" + str(len(dev_char)) + \" \" + str(len(dev_tag)))\n\n # test_char, test_tag, test_dict, test_len = _file_to_char_ids(test_path, char_to_id, tag_to_id, pinyin_dict)\n # test_char, test_tag, test_len, test_seg = _ner_file_to_char_ids(test_path, char_to_id, tag_to_id)\n # print(\"test dataset: \" + str(len(test_char)) + \" \" + str(len(test_tag)))\n # vocab_size = len(char_to_id)\n # sums = 0\n # for l in dev_len:\n # sums = sums+l+1\n # print(sums)\n print(sum(dev_len))\n xArray, yArray, lArray, segArray = ner_iterator(dev_char, dev_tag, dev_len, 1, dev_seg)\n char_sums = 0\n for l in lArray:\n char_sums = char_sums+sum(l)\n print(char_sums)", "def run():\n # all data and labels\n # tracemalloc.start()\n # start = time.time()\n data, labels = Startdata.getdata() # texts\n data2, labels2 = Startdata.getdata2() # emails\n # Startdata.bagofwords(data2, labels2)\n data, labels = Startdata.combinedata(data, data2, labels, labels2)\n # split into training and testing. 1/3 test, 2/3 train\n traind, trainl, testd, testl = Startdata.splitdata(data, labels)\n\n # labels\n trainlabels = Startdata.labelfix(trainl)\n testlabels = Startdata.labelfix(testl)\n\n # selective features\n #\n # extract features for use. in the shape of NxD\n # N is number of samples, D is number of features\n # current, peak = tracemalloc.get_traced_memory()\n trainfeat = Startdata.featurextract(traind, trainl)\n testfeat = Startdata.featurextract(testd, testl)\n # theta is the weights in a D+1 X 1 array\n theta = Spamfilter.train(trainfeat, trainlabels)\n #\n # trying bag of words\n #\n\n # Startdata.featurextract(data, labels)\n # error rate was 1.69% for trainingdata\n # 2.21% for testing data\n # bag, tfeat = Startdata.bagofwords(traind)\n # theta = Spamfilter.train(tfeat, trainlabels)\n # testfeat = Startdata.features(testd, bag)\n\n test(theta, testfeat, testlabels)\n # tracemalloc.stop()\n # done = time.time()\n # print(f\"Current memory usage is {current / 10**6} MB; Peak was {peak / 10**6} MB\")\n # print(\"time to complete\", done - start)\n # NTR 12/1/2020 current best featextraction at 25 iterations is about\n # 0.7-1% error for\n # trainingdata and testing data\n # NTR 12/2/2020 bag of words at 25 iterations\n # 1.69% training error, 2.21% testing error\n # NTR 12/2/2020 bag of words, 25 iter, removal of some features\n # NTR 12/3/2020 featextraction 20 iterations, new features, emails inc\n # 0.59% error on training. 0.63% testing error", "def Preprocess_IMDB(path=\"datasets/raw/aclImdb/\"):\n output_path = \"datasets/preprocessed/IMDB_Data\"\n\n neg = glob.glob(os.path.join(path, 'test', 'neg', '*'))\n neg += glob.glob(os.path.join(path, 'train', 'neg', '*'))\n neg_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in neg]\n neg_data = [sentence[0] for sentence in neg_data]\n\n\n pos = glob.glob(os.path.join(path, 'test', 'pos', '*'))\n pos += glob.glob(os.path.join(path, 'train', 'pos', '*'))\n pos_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in pos]\n pos_data = [sentence[0] for sentence in pos_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def main():\n\n tok = T5Tokenizer.from_pretrained('t5-small')\n data = Data(\n xmi_dir=args.xmi_dir,\n tokenizer=tok,\n max_input_length=args.max_input_length,\n max_output_length=args.max_output_length,\n partition=args.partition,\n n_files=args.n_files)\n\n for index in range(len(data)):\n input_ids = data[index]['input_ids']\n output_ids = data[index]['labels']\n print(tok.decode(input_ids, skip_special_tokens=True))\n print(tok.decode(output_ids, skip_special_tokens=True))\n print()", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--filepath\", default=None, type=str, required=True, help=\"Path to dataset\")\n parser.add_argument(\"--truncate\", action='store_true', help=\"Truncate the data when enabled\")\n parser.add_argument(\"--stats\", action='store_true', help=\"Get stats for the file\")\n parser.add_argument(\"--count_vocab\", action='store_true', help=\"Get vocabulary count and save vocabulary for the file\")\n ##generation\n parser.add_argument('--generate', action='store_true', help=\"Start the generation\")\n parser.add_argument(\"--temperature\", type=float, default=1.0, help=\"Softmax temperature setting\")\n parser.add_argument(\"--length\", type=int, default=150, help=\"number of words to be generated\")\n parser.add_argument(\"--top_k\", type=int, default=1, help=\"parameter for Top-k sampling\")\n parser.add_argument('--stop_token', type=str, default=None, help=\"Token at which text generation is stopped\")\n parser.add_argument('--num_samples', type=int, default=500, help=\"Number of samples to be generated and compared with\")\n parser.add_argument('--save_dir', default=\"../save/\", type=str, help=\"Path to save the system outputs\")\n parser.add_argument(\"--no_cuda\", action='store_true', help=\"Avoid using CUDA when available\")\n ##evaluation\n parser.add_argument(\"--evaluate\", action='store_true', help=\"Start the evaluation\")\n parser.add_argument(\"--eval_dir\", default='../save/gpt2/', help=\"The path to evaluate the system outputs\")\n parser.add_argument(\"--eval_model\", default='gpt2', help=\"The model name to evaluate the system outputs\")\n parser.add_argument(\"--reading_scores\", action='store_true', help=\"Get the average reading scores\") #OK\n parser.add_argument(\"--content_words\", action='store_true', help=\"Get the normalized mean of content words and stop words\") #OK\n parser.add_argument(\"--ngram_overlap\", action='store_true', help=\"Get the average N gram overlap percentage with the prompt\") #OK\n parser.add_argument(\"--sw\", action='store_true', help=\"Do stopword elimination\")\n parser.add_argument(\"--stem\", action='store_true', help=\"Do stemming\")\n parser.add_argument(\"--parse_scores\", action='store_true', help=\"Get the average, skewness and kurtosis of the parses of stories\") \n parser.add_argument(\"--sentemb_sim_scores\", action='store_true', help=\"Get the sentence embedding similarity percentage with the prompt\")\n parser.add_argument(\"--sent_length\", action='store_true', help=\"Get the average sentence length\")\n parser.add_argument(\"--pos_tag_fqd\", action='store_true', help=\"Get POS tag frequency distribution as percentages\")\n parser.add_argument(\"--log_unigm_prob\", action='store_true', help=\"Get the average log unigram probability\")\n # parser.add_argument(\"--coherence_scores\", action='store_true', help=\"Get the average coherence scores\") \n args = parser.parse_args()\n\n\n filepath = args.filepath\n truncate_bool = args.truncate\n stats_bool = args.stats \n vocab_bool = args.count_vocab\n #generation\n generate_bool = args.generate\n temperature = args.temperature\n length = args.length\n top_k = args.top_k\n stop_token = args.stop_token\n num_samples = args.num_samples\n save_dir = args.save_dir\n no_cuda_bool = args.no_cuda\n #evaluation\n evaluate_bool = args.evaluate\n eval_direcpath = args.eval_dir #path to the model folder\n eval_modelname = args.eval_model #name of the model evaluating\n eval_RS = args.reading_scores #evaluate reading scores\n eval_CW = args.content_words #evaluate the percentage of content and stop words\n eval_NG = args.ngram_overlap #evaluate story prompt relatedness scores with ngram overlap pc\n eval_PS = args.parse_scores #evaluate the grammaticality\n eval_SE = args.sentemb_sim_scores #evaluate story prompt relatedness scores\n eval_SL = args.sent_length #evaluate the syntactic complexity\n eval_PF = args.pos_tag_fqd #evaluate the pos-tag frequency distribution as percentages\n eval_RW = args.log_unigm_prob #evaluate the rareword usage scores as mean log unigram probability\n sw = False\n if args.sw:\n sw = True\n stem = False\n if args.stem:\n stem = True\n\n f_prep = FilePreprocessor(filepath) \n if truncate_bool: #required when you are running the code the first time\n f_prep.truncate_stories(num_words=1000)\n if stats_bool:\n num_stories, num_prompts = f_prep.check_num_stories()\n print (num_prompts, num_stories) \n if vocab_bool:\n vocab_counter_prompt, vocab_counter_story = f_prep.make_vocabulary()\n print (\"The vocabulary for the stories: {}\".format(vocab_counter_story))\n print (\"The vocabulary for the prompts: {}\".format(vocab_counter_prompt))\n ##### get the prompt from the file -- done\n ##### get the model type and model file name and path as a dictionary -- done\n ##### for each model type save the prompt, the original story and the generated story with \"temp val\" and \"top k\" val and \"model name\" and \"index of random story prompt selected\" in a file: \"gentext_\"+model_+\"_\"+temperature+\"_\"+top_k+\"_\"+i -- done\n ##### finish the 4 openai gptx models and then move onto xlnet models --done\n if generate_bool:\n # define the pre-trained models offered by huggingface/transformers github: https://github.com/huggingface/transformers for generation\n # Model classes at https://github.com/huggingface/transformers/blob/master/examples/run_generation.py \n if not os.path.exists(save_dir): os.mkdir(save_dir)\n # PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\", \"distilgpt2\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"], \"xlm\": [\"xlm-mlm-en-2048\", \"xlm-mlm-ende-1024\", \"xlm-mlm-enfr-1024\", \"xlm-mlm-enro-1024\", \"xlm-mlm-tlm-xnli15-1024\", \"xlm-mlm-xnli15-1024\", \"xlm-clm-enfr-1024\", \"xlm-clm-ende-1024\", \"xlm-mlm-17-1280\", \"xlm-mlm-100-1280\"]}\n PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"]}\n # #check values for variables exist\n # assert temperature\n # assert length\n # assert top_k\n print (\"Get the prompts from {} samples in the test set...\".format(num_samples))\n story_files_dict = f_prep.get_art_prp_file()\n story_files_test = story_files_dict['test']\n nums_selected = random.sample(range(len(story_files_test)), num_samples)\n for idx, i in enumerate(nums_selected):\n prompt = (story_files_test[i][0]).replace(\"[ wp ]\", \"\") #remove the tag from the prompt and save it\n story = story_files_test[i][1]\n # print (\"Prompt: {}\".format(prompt))\n # print (\"Original Story: {}\".format(story))\n for k,v in PT_model_dict.items():\n model_type = k\n model_names_list = v\n for model_ in model_names_list:\n print (\"Generating story #{} with model {} ...\".format(idx+1, model_))\n print (\"Selected story prompt: {}\".format(i+1))\n start_time = time.time()\n generated_text = text_generator(model_type=model_type, model_name_or_path=model_, prompt=prompt, padding_text=story[:50], xlm_lang=\"\", length=length, temperature=temperature, top_k=top_k, top_p=0.9, no_cuda=no_cuda_bool, seed=42, stop_token=stop_token, verbose=False)\n time_elapsed = time.time() - start_time\n temp_pc = int(temperature*100)\n filename_ = \"gentext_\"+model_+\"_T\"+str(temp_pc)+\"_k\"+str(top_k)+\"_\"+str(i)+\".txt\"\n with open(os.path.join(save_dir, filename_),'w') as w_f:\n w_f.write(\"Prompt: \" + prompt + \"\\n\")\n w_f.write(\"Original: \" + story + \"\\n\")\n w_f.write(\"Generated: \" + generated_text + \"\\n\")\n w_f.write(\"Time elapsed: \" + str(time_elapsed) + \"\\n\")\n ##### get the directory of the samples by each model --done\n ##### read the files and get the dataframe from each model \n if evaluate_bool:\n print (\"Evaluation for {} model: \".format(eval_modelname))\n eval_modelObj = EvalDQ(eval_direcpath)\n print (\"Reading the samples ...\") \n \n if eval_modelname == \"fusion\":\n df_modelObj = eval_modelObj.read_fusion_output()\n else:\n df_modelObj = eval_modelObj.read_data_strings()\n # print (df_modelObj[\"temp\"].tolist())\n # exit()\n \n temp = set(df_modelObj[\"temp\"].tolist())\n topK = set(df_modelObj[\"topK\"].tolist())\n print (\"The shape of the Dataframe object for model {} is {}:\".format(eval_modelname, df_modelObj.shape))\n print (\"The temperature and k values are: {} and {}:\".format(temp, topK))\n \n if eval_RS:\n print (\"Calculating the Readability scores ... \")\n print (\"For the original stories ...\")\n df_modelObj_RS_original = eval_modelObj.get_readability_scores(df_modelObj,\"original\")\n print (\"The mean reading score values for the original files ...\")\n print (df_modelObj_RS_original.mean(axis=0))\n print (\"For the generated stories ...\")\n df_modelObj_RS_generated = eval_modelObj.get_readability_scores(df_modelObj,\"generated\")\n print (\"The mean reading score values for the generated files ...\")\n print (df_modelObj_RS_generated.mean(axis=0))\n \n if eval_CW:\n print (\"Calculating the percentage of content words VS stop words ...\")\n print (\"For the original stories ...\")\n cw_ct_ori, sw_ct_ori = eval_modelObj.count_contentwords(df_modelObj, \"original\")\n mean_cw_ct_ori = statistics.mean(cw_ct_ori) #look at the normalized mean \n mean_sw_ct_ori = statistics.mean(sw_ct_ori)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_ori, mean_sw_ct_ori))\n print (\"For the generated stories ...\")\n cw_ct_gen, sw_ct_gen = eval_modelObj.count_contentwords(df_modelObj, \"generated\")\n mean_cw_ct_gen = statistics.mean(cw_ct_gen) #look at the normalized mean \n mean_sw_ct_gen = statistics.mean(sw_ct_gen)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_gen, mean_sw_ct_gen))\n\n if eval_NG:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the average n-gram overlap with the prompt...\")\n # avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=3)\n # print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n print (\"For the original stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_ori = eval_modelObj.ngram_overlap(df_modelObj, (\"original\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_ori))\n print (\"For the generated stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n\n if eval_PS:\n print (\"Calculating the constituency parsing scores ...\")\n print (\"For the original stories ...\")\n _, skew_scores_ori, kurt_scores_ori = eval_modelObj.parsing_score_calculation(df_modelObj, \"original\")\n mean_skew_scores_ori = statistics.mean(skew_scores_ori) #look at the normalized mean \n mean_kurt_scores_ori = statistics.mean(kurt_scores_ori)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_ori, mean_kurt_scores_ori))\n print (\"For the generated stories ...\")\n _, skew_scores_gen, kurt_scores_gen = eval_modelObj.parsing_score_calculation(df_modelObj, \"generated\")\n mean_skew_scores_gen = statistics.mean(skew_scores_gen) #look at the normalized mean \n mean_kurt_scores_gen = statistics.mean(kurt_scores_gen)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_gen, mean_kurt_scores_gen))\n \n if eval_SE:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the sentence embedding similarity with the prompt...\")\n print (\"For the original stories ...\")\n avg_sentemb_sim_ori = eval_modelObj.word2vec_sentsim(df_modelObj, (\"original\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_ori))\n print (\"For the generated stories ...\")\n avg_sentemb_sim_gen = eval_modelObj.word2vec_sentsim(df_modelObj, (\"generated\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_gen))\n\n if eval_SL:\n print (\"Calculating the average sentence length ...\")\n print (\"For the orginal stories ...\")\n sentlen_list_ori = eval_modelObj.average_sentence_length(df_modelObj, \"original\")\n mean_sentlen_ori = statistics.mean(sentlen_list_ori)\n print (\"The average sentence length is {}\".format(mean_sentlen_ori))\n print (\"For the generated stories ...\")\n sentlen_list_gen = eval_modelObj.average_sentence_length(df_modelObj, \"generated\")\n mean_sentlen_gen = statistics.mean(sentlen_list_gen)\n print (\"The average sentence length is {}\".format(mean_sentlen_gen))\n \n if eval_PF:\n print (\"Calculating the POS tag frequency tag distribution ...\")\n print (\"For the original stories ...\")\n df_modelObj_POS_ori = eval_modelObj.pos_tag_freqdist(df_modelObj, \"original\")\n print (\"The mean POS tag percentages for the original files ...\")\n POS_dict_ori = (df_modelObj_POS_ori.mean(axis=0)).to_dict()\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_ori['NOUN']*100, POS_dict_ori['VERB']*100))\n print (\"For the generated stories ...\")\n df_modelObj_POS_gen = eval_modelObj.pos_tag_freqdist(df_modelObj, \"generated\")\n print (\"The mean POS tag percentages for the generated files ...\")\n POS_dict_gen = df_modelObj_POS_gen.mean(axis=0)\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_gen['NOUN']*100, POS_dict_gen['VERB']*100))\n\n if eval_RW:\n print (\"Calculating the rare word usage metrics ...\")\n print (\"For the generated stories ...\")\n mean_ug_prblst_ori = eval_modelObj.get_rareword_usage(df_modelObj)\n mean_ug_ori = statistics.mean(mean_ug_prblst_ori)\n print (\"The average unigram probability is {}\".format(mean_ug_ori))", "def preprocess(path):\n \"\"\"Load the dictionary and the tokenizer.\"\"\"\n with open(('aux_files/enc_dic_%s_%d_%d_%s.pkl' % (FLAGS.data, MAX_VOCAB_SIZE, FLAGS.sn, FLAGS.sigma)), 'rb') as f:\n enc_dic = pickle.load(f)\n with open(('aux_files/tokenizer_%s_%d.pkl' % (FLAGS.data, MAX_VOCAB_SIZE)), 'rb') as f:\n tokenizer = pickle.load(f)\n\n \"\"\"We only use the original sequence `train_seq_o` and `test_seq_o`\"\"\"\n train_seq, train_seq_o, train_labels = encode_utils.text_encode(tokenizer, enc_dic, FLAGS.data+'/train', MAX_VOCAB_SIZE)\n test_seq, test_seq_o, test_labels = encode_utils.text_encode(tokenizer, enc_dic, FLAGS.data+'/test', MAX_VOCAB_SIZE)\n\n \"\"\"If use adversarial training method, add the adversarial samples to the original data:\"\"\"\n if FLAGS.adv:\n # Load adversarial samples.\n adv_train_seq_o, adv_train_labels = encode_utils.adv_text_encode(tokenizer, enc_dic, FLAGS.data, FLAGS.nn_type, MAX_VOCAB_SIZE)\n train_seq_o.extend(adv_train_seq_o)\n train_labels.extend(adv_train_labels)\n print('Adversarial Training, and extend the data.')\n\n \"\"\"Load the embedding matrix, and pad sequence to the same length\"\"\"\n embedding_matrix = np.load(('aux_files/embeddings_glove_%s_%d.npy' %(FLAGS.data, MAX_VOCAB_SIZE)))\n max_len = 250\n x_train = pad_sequences(train_seq_o, maxlen=max_len, padding='post')\n y_train = np.array(train_labels)\n x_test = pad_sequences(test_seq_o, maxlen=max_len, padding='post')\n y_test = np.array(test_labels)\n\n print('Training data: %d' % len(y_train))\n\n return x_train, y_train, x_test, y_test, embedding_matrix", "def beam_decode(model, src, src_mask, src_lengths, max_len=100, sos_index=1, eos_index=None, beam_size=5):\n\n with torch.no_grad():\n encoder_hidden, encoder_final = model.encode(src, src_mask, src_lengths)\n\n output = []\n hidden = None\n\n i = 0\n beam_nodes = []\n beam_nodes.append(BeamNode(sos_index, hidden, 0))\n ended = False #Flag raised when EOS token found\n while i<max_len and not ended:\n new_nodes = []\n for node in beam_nodes:\n prev_word = node.prev_input\n prev_y = torch.ones(1, 1).fill_(prev_word).type_as(src)\n trg_mask = torch.ones_like(prev_y)\n hidden = node.prev_h\n with torch.no_grad():\n out, hidden, pre_output = model.decode(\n encoder_hidden, encoder_final, src_mask,\n prev_y, trg_mask, hidden)\n\n # we predict from the pre-output layer, which is\n # a combination of Decoder state, prev emb, and context\n prob = model.generator(pre_output[:, -1])\n\n probs, words = torch.topk(prob, beam_size, dim=1)\n #print(probs, words)\n probs = probs.squeeze().cpu().numpy()\n words = words.squeeze().cpu().numpy()\n #print([lookup_words(x, TRG.vocab) for x in words])\n# print(lookup_words(words, TRG.vocab))\n #print(probs)\n #print(words)\n for j in range(len(probs)):\n #print(j)\n probj = probs[j]\n next_word = words[j]\n #print(probi)\n #print(wordi)\n new_words = node.words.copy() + [next_word]\n new_prob = node.logProb + probj\n new_node = BeamNode(next_word, hidden, new_prob, words=new_words, attention_scores=node.attention_scores.copy())\n new_node.attention_scores.append(model.decoder.attention.alphas.cpu().numpy())\n new_nodes.append(new_node)\n i+=1\n #print(\"first\", len(beam_nodes))\n beam_nodes = sorted(new_nodes, key=lambda node: -node.logProb)[:beam_size] \n #print(lookup_words([n.prev_input for n in beam_nodes], TRG.vocab))\n #print([n.logProb for n in beam_nodes])\n #print([n.logProb for n in beam_nodes])\n #print(len(beam_nodes))\n ended = any([True if node.prev_input==eos_index else False for node in beam_nodes])\n #print(ended)\n output = []\n attns = []\n if ended:\n end_node_i = [1 if node.prev_input==eos_index else 0 for node in beam_nodes].index(1)\n end_node = beam_nodes[end_node_i]\n output = np.array(end_node.words[:-1])\n else:\n end_node = beam_nodes[0]\n output = np.array(end_node.words)\n #print(end_node.attention_scores) \n #print(np.array(end_node.attention_scores).shape) \n #print([x.shape for x in end_node.attention_scores])\n #print(output)\n return output, np.concatenate(np.array(end_node.attention_scores), axis=1)", "def preprocess_txt(txt, word_index, max_txt_size=255):\n wd_list = listify_txt(txt)\n encoded = [1]\n for word in wd_list:\n if word in word_index:\n encoded.append(word_index[word])\n else:\n encoded.append(word_index[\"<UNK>\"]) \n encoded = keras.preprocessing.sequence.pad_sequences([encoded], value=word_index[\"<PAD>\"], padding=\"post\", maxlen=max_txt_size)[0]\n return encoded", "def _generateUnigrams(self,text):\n self.unigrams=self._generateNgrams(text,1)", "def setUp(self):\n super().setUp()\n self._embedding_size = 50\n self._model = BinarySkipGram(\n vocabulary_size=self._graph.get_nodes_number(),\n embedding_size=self._embedding_size\n )\n self.assertEqual(\"BinarySkipGram\", self._model.name)\n self._model.summary()", "def test_no_ngrams():\n tokenizer = Tokenizer(quadgram_freq=2)\n X = tokenizer.transform([[\"a b c d\"]])\n assert X[\"corpus\"][0] == [\"a\", \"b\", \"c\", \"d\"]\n assert tokenizer.quadgrams is None", "def _index_skipgrams(self) -> None:\n for skipgram in self.skipgrams:\n self.skipgram_index[skipgram.string] += [skipgram]\n for skipgram in self.skipgrams_lower:\n self.skipgram_index_lower[skipgram.string] += [skipgram]", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', type=str, help='path to corpus file', default='./train')\n args = parser.parse_args()\n\n corpus_reader = CorpusReader(args.file)\n model = BigramModel(corpus_reader.sents())\n\n test_sentences = ['Suggestive, Watson, is it not?',\n 'It is amazing that a family can be torn apart by something as simple as a pack of wild dogs!',\n 'So spoke Sherlock Holmes and turned back to the great scrapbook in which he was arranging and indexing some of his recent material.',\n 'What I like best about my friends is that they are few.',\n 'Friends what is like are they about I best few my that.']\n\n # prints two paragraphs with each five sentences\n for _ in range(2):\n print(generate(model, 5) + '\\n')\n\n # for each sentence in the test_sentences print the perplexity\n for sentence in test_sentences:\n print(model.perplexity(nltk.word_tokenize(sentence)))", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):\n banned_tokens = [\n torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)\n ]\n if step + 2 - self.no_repeat_ngram_size >= 0:\n cpu_tokens: List[List[int]] = tokens.cpu().tolist()\n check_start_pos = step + 2 - self.no_repeat_ngram_size\n for bbsz_idx in range(bsz * beam_size):\n ngram_to_check = cpu_tokens[bbsz_idx][\n -(self.no_repeat_ngram_size - 1) :\n ]\n for i in range(check_start_pos):\n if (\n ngram_to_check\n == cpu_tokens[bbsz_idx][i : i + self.no_repeat_ngram_size - 1]\n ):\n banned_tokens[bbsz_idx].append(\n cpu_tokens[bbsz_idx][i + self.no_repeat_ngram_size - 1]\n )\n for bbsz_idx in range(bsz * beam_size):\n lprobs[bbsz_idx][\n torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64)\n ] = torch.tensor(-math.inf).to(lprobs)\n return lprobs", "def run(args):\n ####################\n # Data Processing #\n ####################\n\n words, counts = load_input(args.train_path)\n trie = marisa_trie.Trie(counts.keys())\n indices = load_indices('word', words, counts)\n args.nb_classes = len(indices.keys())\n print(len(indices.keys()))\n timeseries = make_embedding(args.train_path, words, indices)\n\n ###################\n # Construct model #\n ###################\n\n if os.path.exists(name+'.h5'):\n model = keras_load_model(name+'.h5')#', custom_objects={'perplexity': perplexity})\n model.summary()\n else:\n model = densenet.DenseNet(args.nb_classes,\n args.img_dim,\n args.depth,\n args.nb_dense_block,\n args.growth_rate,\n args.nb_filter,\n dropout_rate=args.dropout_rate,\n weight_decay=args.weight_decay)\n # Model output\n model.summary()\n\n # Build optimizer\n opt = Nadam(lr=args.learning_rate)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['mae', 'accuracy'])\n\n ####################\n # Network training #\n ####################\n\n train(model, timeseries, indices, words, args)\n\n #words, counts = load_input(args.test_path)\n #timeseries = make_embedding(args.test_path, words, indices)\n #trie = load_trie(counts)\n #charinds = load_indices('char')\n #char_to_bpc(model, timeseries, indices, charinds, words, args, trie) #cel\n #word_to_perplexity(model, timeseries, indices, words, args) #nll", "def generate_text(model, field, seed, n=500):\n string = seed\n for i in range(n):\n indexes = field.numericalize(string)\n predictions = model(indexes.transpose(0, 1))\n last_output = predictions[-1]\n [most_probable] = torch.multinomial(last_output.exp(), 1)\n char = field.vocab.itos[most_probable]\n seed = seed[1:] + char\n string += char\n return string", "def test_text_classifier_del_training_samples(self):\n pass", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:\n words, vocab = cPickle.load(f)\n tf.reset_default_graph()\n model = Model(saved_args, True)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n for _ in range(args.count):\n output = model.sample(sess, words, vocab, args.n, args.prime,\n args.sample, args.pick,\n args.width, args.quiet)\n score, matches = eval_str(output)\n print(\"===== Before GTranslate Smoothing. Grammar Score = %i\" %score)\n print(output)\n gtranslate_output = translate(output)\n new_score, new_matches = eval_str(gtranslate_output)\n print(\"===== After GTranslate Smoothing. Grammar Score = %i\" %new_score)\n print(translate(gtranslate_output))\n if args.show_grammar:\n for err in matches:\n print(\"---\")\n print(err)", "def frontend(text):\n text = pyopenjtalk.g2p(text, kana=False)\n print(f\"Cleaned text: {text}\")\n charseq = text.split(\" \")\n idseq = []\n for c in charseq:\n if c.isspace():\n idseq += [char_to_id[\"<space>\"]]\n elif c not in char_to_id.keys():\n idseq += [char_to_id[\"<unk>\"]]\n else:\n idseq += [char_to_id[c]]\n idseq += [idim - 1] # <eos>\n return torch.LongTensor(idseq).view(-1).to(device)", "def process_sentence(sentence, skip_window):\n batch = []\n labels = []\n for pos, word in enumerate(sentence):\n # now go over all words from the window, predicting each one in turn\n start = max(0, pos - skip_window)\n # enumerate takes a second arg, which sets the starting point, this makes pos and pos2 line up\n for pos2, word2 in enumerate(sentence[start: pos + skip_window + 1], start):\n if pos2 != pos:\n batch.append(word)\n labels.append([word2])\n return batch, labels", "def test_text_task(self):\n args = BASE_ARGS.copy()\n args.update(TEXT_ARGS)\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 1.5, 'failed to train image_seq2seq on text task'\n )", "def test_word2vec():\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in xrange(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n print \"==== Gradient check for skip-gram ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n print \"\\n==== Gradient check for CBOW ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n\n print \"\\n=== Results ===\"\n print skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print skipgram(\"c\", 1, [\"a\", \"b\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)\n print cbow(\"a\", 2, [\"a\", \"b\", \"c\", \"a\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print cbow(\"a\", 2, [\"a\", \"b\", \"a\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)", "def test_word2vec():\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in xrange(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n print \"==== Gradient check for skip-gram ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n print \"\\n==== Gradient check for CBOW ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n\n print \"\\n=== Results ===\"\n print skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print skipgram(\"c\", 1, [\"a\", \"b\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)\n print cbow(\"a\", 2, [\"a\", \"b\", \"c\", \"a\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print cbow(\"a\", 2, [\"a\", \"b\", \"a\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)", "def test_word2vec():\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in xrange(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n print \"==== Gradient check for skip-gram ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n print \"\\n==== Gradient check for CBOW ====\"\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n\n print \"\\n=== Results ===\"\n print skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print skipgram(\"c\", 1, [\"a\", \"b\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)\n print cbow(\"a\", 2, [\"a\", \"b\", \"c\", \"a\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)\n print cbow(\"a\", 2, [\"a\", \"b\", \"a\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient)", "def main():\n\n gpu_id = 1\n d_batch = 64\n d_embed = 256\n d_hidden = 256\n d_image_size = 256\n device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')\n dataset, train_loader = get_default_flickr30k_loader(d_batch=d_batch, d_image_size=d_image_size)\n model = Img2Txt(dataset.d_vocab, d_embed, d_hidden, dataset.start_token, dataset.end_token).to(device)\n\n train(model, dataset, train_loader, device)", "def go(arg):\n # load the IMDB data\n if arg.final:\n train, test = datasets.IMDB.splits(TEXT, LABEL)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2)\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n else:\n tdata, _ = datasets.IMDB.splits(TEXT, LABEL)\n train, test = tdata.split(split_ratio=0.8)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2) # - 2 to make space for <unk> and <pad>\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n\n print(f'- nr. of training examples {len(train_iter)}')\n print(f'- nr. of {\"test\" if arg.final else \"validation\"} examples {len(test_iter)}')\n\n if arg.max_length < 0:\n mx = max([input.text[0].size(1) for input in train_iter])\n mx = mx * 2\n print(f'- maximum sequence length: {mx}')\n else:\n mx = arg.max_length\n\n # create the model\n model = Transformer(k=arg.dim_model, heads=arg.num_heads, depth=arg.depth,\n num_tokens=arg.vocab_size, num_classes=NUM_CLS)\n use_cuda = torch.npu.is_available() and not arg.cpu\n device = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')\n\n model = model.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n opt = Adam(params=model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, amsgrad=False)\n sch = torch.optim.lr_scheduler.LambdaLR(opt, lambda i: min(i / (arg.lr_warmup / arg.batch_size), 1.0))\n\n # training loop\n seen = 0\n for e in range(arg.num_epochs):\n\n print(f'\\n epoch {e}')\n model.train(True)\n for batch in tqdm.tqdm(train_iter):\n\n opt.zero_grad()\n\n input = batch.text[0].to(f'npu:{NPU_CALCULATE_DEVICE}')\n label = batch.label - 1\n label = label.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input)\n loss = F.nll_loss(out, label)\n\n loss.backward()\n\n # clip gradients\n # - If the total gradient vector has a length > 1, we clip it back down to 1.\n if arg.gradient_clipping > 0.0:\n nn.utils.clip_grad_norm_(model.parameters(), arg.gradient_clipping)\n\n opt.step()\n sch.step()\n\n seen += input.size(0)\n # tbw.add_scalar('classification/train-loss', float(loss.item()), seen)\n\n with torch.no_grad():\n\n model.train(False)\n tot, cor = 0.0, 0.0\n\n for batch in test_iter:\n\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input).argmax(dim=1)\n\n tot += float(input.size(0))\n cor += float((label == out).sum().item())\n\n acc = cor / tot\n print(f'-- {\"test\" if arg.final else \"validation\"} accuracy {acc:.3}')\n # tbw.add_scalar('classification/test-loss', float(loss.item()), e)\n for batch in test_iter:\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n print(input)", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def ner(text = None, dist=False):\n r = []\n if text != None:\n r = requests.post(\"https://api.nb.no/ngram/ner\", json={'text':text,'dist':dist})\n return r.json()", "def train(\n train_texts: List[str],\n train_labels: List[str],\n pretrain_params: Any = None) -> Any:\n train_texts = preprocessing(train_texts)\n train_tokenized_texts = text_to_tokens(train_texts)\n\n train_pos = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'pos']\n train_neg = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'neg']\n \n cnt_pos_docs = len(train_pos)\n cnt_neg_docs = len(train_neg)\n\n\n all_words_freq = defaultdict(int)\n all_words = set()\n\n pos_dict = defaultdict(int)\n neg_dict = defaultdict(int)\n sum_len_pos = 0\n sum_len_neg = 0\n\n for text in train_pos:\n for token in text:\n all_words.add(token)\n all_words_freq[token] += text[token]\n pos_dict[token] += text[token]\n sum_len_pos += text[token]\n \n for text in train_neg:\n for token in text:\n all_words.add(token)\n all_words_freq[token] += text[token]\n neg_dict[token] += text[token]\n sum_len_neg += text[token]\n \n alpha = 1 #For additive smoothing\n M = len(all_words)\n sum_len = 0\n print(\"____________\")\n print(\"Sum of text lens\", sum_len)\n print(\"____________\")\n print(\"Words quantity\", M)\n print(\"____________\")\n\n token_probs_pos = defaultdict(int)\n token_probs_neg = defaultdict(int)\n print(\"Calculate probablity for\", M, \"tokens\")\n\n i = 0\n for token in all_words:\n if (i % 5000 == 0):\n print(\"__________\")\n print(\"Calculated\", i, \"tokens\")\n print(\"__________\")\n token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n i += 1\n \n return {\n \"token_probs_pos\": token_probs_pos,\n \"token_probs_neg\": token_probs_neg,\n \"all_words\": all_words,\n \"sum_len_pos\": sum_len_pos,\n \"sum_len_neg\": sum_len_neg,\n \"cnt_pos_docs\": cnt_pos_docs,\n \"cnt_neg_docs\": cnt_pos_docs,\n \"pos_dict\": pos_dict,\n \"neg_dict\": neg_dict\n }", "def sample_beam(model, input_embedding, char2idx, idx2char, k=5, maxlen=30,\n start='START', use_head=True):\n with torch.no_grad():\n device = input_embedding.device\n softmax = nn.Softmax(dim=1)\n if use_head:\n input_embedding = input_embedding.view(1, -1)\n\n inp = [torch.LongTensor([char2idx[start]]).to(device)]\n inp = nn.utils.rnn.pack_sequence(inp)\n out, hidden = model(input_embedding, inp, use_head=use_head)\n\n out = softmax(out.data).view(-1).cpu().numpy()\n max_k = np.argsort(out)[-k:][::-1]\n oldprobs = out[max_k]\n words = [[i] for i in max_k]\n inp = pack([torch.LongTensor([j]).to(device) for j in max_k])\n\n if model.mode == 'LSTM':\n hidden0 = torch.cat([hidden[0] for i in range(k)], dim=1)\n hidden1 = torch.cat([hidden[1] for i in range(k)], dim=1)\n hidden = hidden0, hidden1\n else:\n hidden = torch.cat([hidden for i in range(k)], dim=1)\n WORDS = []\n for c in range(maxlen):\n out, hidden = model(hidden, inp, use_head=False)\n out = softmax(out.data).cpu().numpy()\n\n #print(out.shape)\n inpnp = inp.data.detach().cpu().numpy()\n done = np.where(inpnp == char2idx['END'])\n out[done] = 0\n if len(out[done]) != 0:\n #print(out[done].shape)\n for d in done[0]:\n out[d][char2idx['END']] = 1\n #print(done)\n\n #print(out)\n #print(out[done])\n out = (oldprobs.reshape(-1, 1)*out)\n max_k = np.argsort(out)[:, -k:][:, ::-1]\n\n #print(max_k)\n probs = np.array([out[i][max_k[i]] for i in range(k)])\n #print(probs)\n flat = probs.reshape(-1)\n max_k2 = np.argsort(flat)[::-1][:k]\n word_inds = max_k2//k\n next_chars_inds = max_k2%k\n\n oldprobs = flat[max_k2]\n #print(oldprobs)\n\n new_words = []\n new_inp = []\n for i, word_ind in enumerate(word_inds):\n next_char = max_k[word_ind][next_chars_inds[i]]\n if next_char == char2idx['END']:\n #print(\"HIT AN END at word {}\".format(word_ind))\n WORDS.append((words[word_ind], oldprobs[i]))\n #the_word = words[word_ind]\n #return ''.join([idx2char[i] for i in the_word])\n new_inp.append(torch.LongTensor([next_char]).to(device))\n word = words[word_ind][:]\n word = word + [next_char]\n new_words.append(word)\n words = new_words[:]\n\n if model.mode == 'LSTM':\n h1, h2 = hidden\n h1, h2 = h1[0][word_inds].view(1, k, -1), h2[0][word_inds].view(1, k, -1)\n hidden = h1, h2\n else:\n hidden = hidden[0][word_inds].view(1, k, -1)\n\n\n inp = pack(new_inp)\n\n return [''.join([idx2char[i] for i in word if i != char2idx['END']]) for word in words], oldprobs", "def pretrain(texts_list: List[List[str]]) -> Any:\n \n return None", "def main(args: List[str]):\n argv = {a.split('=')[0]: a.split('=')[1] for a in args[1:]}\n\n load_path = argv.get('load_path', None)\n assert load_path, \"No load_path specified\"\n\n batch_size = int(argv.get('batch_size', 128))\n\n device = argv.get('device', None)\n\n text_path = argv.get('text_path', None)\n\n hashtag_analysis = argv.get('hashtag', 'true').lower()\n assert hashtag_analysis in ['true', 'false']\n hashtag_analysis = False if 'f' in hashtag_analysis else True\n\n fast_tokenizer = argv.get('fast_tokenizer', 'false').lower()\n assert fast_tokenizer in ['true', 'false']\n fast_tokenizer = False if 'f' in fast_tokenizer else True\n\n if text_path is None:\n data_path = get_data_path()\n _text_path = Path(data_path, 'test_data.txt')\n if _text_path.is_file():\n text_path = _text_path\n else:\n logger.error(\"No text_path specified\")\n exit(0)\n\n logger.info(f\"Predicting sentiment from data inside {text_path}\")\n\n if not hashtag_analysis:\n trans_predict = TransformersPredict(load_path=load_path, text_path=text_path, device=device,\n fast_tokenizer=fast_tokenizer)\n else:\n freq_threshold = int(argv.get('hashtag_freq', 500))\n prob_threshold = float(argv.get('hashtag_prob', 0.7))\n trans_predict = TransformersPredictWithHashtag(load_path=load_path, text_path=text_path, device=device,\n fast_tokenizer=fast_tokenizer,\n freq_threshold=freq_threshold,\n prob_threshold=prob_threshold)\n trans_predict.predict(batch_size=batch_size)\n trans_predict.submissionToFile()", "def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:\n data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))", "def main():\n print('# load tokenizer')\n set_seed(SEED)\n tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)\n\n print('# load dataset')\n (train_texts, train_labels, valid_texts, valid_labels) = prepare_dataset()\n result = {\n 'train': '{} : {} : {}'.format(len([x for x in train_labels if x == 0]), len([x for x in train_labels if x == 1]), len([x for x in train_labels if x == 2])),\n 'test': '{} : {} : {}'.format(len([x for x in valid_labels if x == 0]), len([x for x in valid_labels if x == 1]), len([x for x in valid_labels if x == 2]))\n }\n print(result['train'])\n print(result['test'])\n train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=MAX_LENGTH)\n valid_encodings = tokenizer(valid_texts, truncation=True, padding=True, max_length=MAX_LENGTH)\n\n print('# convert tokenized data into a torch Dataset')\n train_dataset = SentimentDataset(train_encodings, train_labels)\n valid_dataset = SentimentDataset(valid_encodings, valid_labels)\n\n print('# load model & move to GPU')\n if MODEL_NAME == 'ufal/robeczech-base':\n model = RobertaForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=len(TARGET_NAMES)).to(\"cuda\")\n elif MODEL_NAME == 'Seznam/small-e-czech':\n model = ElectraForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=len(TARGET_NAMES)).to(\"cuda\")\n else:\n model = BertForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=len(TARGET_NAMES)).to(\"cuda\")\n\n print('# set training params')\n w_steps = int((EPOCHS * len(train_texts)) / (3 * BATCH_DEV * VIS_DEV))\n training_args = TrainingArguments(\n output_dir=OUTPUT_DIR, # output directory\n num_train_epochs=EPOCHS, # total number of training epochs\n per_device_train_batch_size=BATCH_DEV, # batch size per device during training\n per_device_eval_batch_size=BATCH_DEV, # batch size for evaluation\n warmup_steps=w_steps, # number of warmup steps for learning rate scheduler\n weight_decay=0.01, # strength of weight decay\n logging_dir='./logs', # directory for storing logs\n load_best_model_at_end=True, # load the best model when finished training (default metric is loss)\n metric_for_best_model='f1',\n logging_steps=100, # log & save weights each logging_steps\n evaluation_strategy=\"steps\", # evaluate each `logging_steps`\n learning_rate=5e-5,\n save_total_limit=3,\n disable_tqdm=True\n )\n trainer = Trainer(\n model=model, # the instantiated Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=valid_dataset, # Yeah, I know this is bad.. but this is just an example.\n compute_metrics=compute_metrics, # the callback that computes metrics of interest\n callbacks=[EarlyStoppingCallback(early_stopping_patience=20)]\n )\n\n print('# train')\n trainer.train()\n print('#################### eval')\n print(trainer.evaluate())\n\n print('#NOT save to disk')\n #model.save_pretrained(MODEL_SAVE_PATH)\n #tokenizer.save_pretrained(MODEL_SAVE_PATH)\n\n def get_probs(text):\n inputs = tokenizer(text, padding=True, truncation=True, max_length=MAX_LENGTH, return_tensors=\"pt\").to(\"cuda\")\n outputs = model(**inputs)\n return outputs[0].softmax(1)\n\n predictions = np.array([get_probs(valid_texts[i]).cpu().detach().numpy()[0] for i in range(len(valid_texts))])\n print('##################### F1 #########################')\n f1 = f1_score(valid_labels, np.argmax(predictions, -1), average='macro')\n print(f1)\n cfm = confusion_matrix(valid_labels, np.argmax(predictions, -1)).tolist()\n print(cfm)\n\n results_pth = Path('{}.json'.format(MODEL_NAME.split('/')[1]))\n with open(results_pth, 'w', encoding='utf-8') as outfile:\n result['f1'] = f1\n result['confusion_matrix'] = cfm\n json.dump(result, outfile, ensure_ascii=False)\n\n rmdir(OUTPUT_DIR)\n del model\n del tokenizer\n del trainer\n # torch.cuda.empty_cache()\n # torch.cuda.synchronize()\n print('Done: ' + str(results_pth))", "def pegasus_eval(text, params):\n model, tokenizer, torch_device = params\n batch = tokenizer.prepare_seq2seq_batch([text], truncation=True, padding='longest', return_tensors=\"pt\").to(torch_device)\n translated = model.generate(**batch)\n output = tokenizer.batch_decode(translated, skip_special_tokens=True)[0]\n output = output.replace('<n>', ' ')\n return output", "def tf_train_seq_data_proc(num_txt, batch_size, max_seq):\r\n x_and_y = tf.data.Dataset.from_tensor_slices(num_txt).apply(tf.contrib.data.batch_and_drop_remainder(max_seq + 1))\r\n x_y_mapped = x_and_y.map(sep_x_y_words)\r\n output = x_y_mapped.shuffle(10000).apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\r\n return output", "def n_grams_predict_next(name: str,\n file_name: Optional[str] = None,\n model: Dict[str, NGramsSequence] = None,\n clean_input_file: Optional[str] = None,\n clean_input_data: Optional[pd.DataFrame] = None,\n num_lines_predict: Optional[int] = None,\n n_grams: int = default_n_grams,\n num_predict: int = 1,\n smoothing: SmoothingType = SmoothingType.basic) -> None:\n\n logger.success(f'predicting with {smoothing.name} for {name}')\n\n if file_name is None and model is None:\n raise ValueError('no file name or model provided')\n\n if clean_input_file is None and clean_input_data is None:\n raise ValueError('no input file name or data provided')\n\n # create n-gram model if not provided\n if model is None:\n json_file_path = file_path_relative(f'{models_folder}/{file_name}')\n logger.info(f'reading data from {json_file_path}')\n with open(json_file_path, 'r') as file:\n model = NGramsModel.from_json(json.load(file))\n\n # get testing data\n if clean_input_data is None:\n file_path = file_path_relative(\n f'{clean_data_folder}/{clean_input_file}')\n logger.info(f'reading data from {file_path}')\n clean_input_data = pd.read_csv(file_path, converters={\n sentences_key: literal_eval})\n\n predict_sentences: List[List[str]] = clean_input_data[sentences_key]\n if num_lines_predict is not None:\n predict_sentences = predict_sentences[:num_lines_predict]\n\n check_probability_smoothing: List[SmoothingType] = [SmoothingType.basic]\n\n logger.success('[[<words>]] = predicted words:')\n\n sum_probability_log: float = 0.\n count_all_predict: int = 0\n\n # iterate over testing data\n for i, sentence in enumerate(predict_sentences):\n full_sentence = sentence.copy()\n for _ in range(num_predict):\n last_words = full_sentence[-n_grams:]\n sequence = ' '.join(last_words)\n\n probabilities = model.get_probabilities(\n sequence, smoothing)\n sum_probability = sum(elem[1] for elem in probabilities)\n # logger.info(f'probabilities: sum: {sum_probability}, all: {probabilities}')\n if smoothing in check_probability_smoothing:\n # for not-unseen outputs, check to\n # make sure sum is approximately 1\n assert np.isclose(\n sum_probability, 1), f'probability of {sum_probability} is not close to 1'\n\n current_output, prob = probabilities[0]\n full_sentence.append(current_output)\n # if not unseen, add to perplexity calculation\n if current_output != unseen_output:\n sum_probability_log += np.log(prob)\n count_all_predict += 1\n\n logger.info(\n f\"{i + 1}. {' '.join(sentence)} [[{' '.join(full_sentence[len(sentence):])}]]\")\n\n if count_all_predict == 0:\n logger.info('no predictions, no perplexity')\n else:\n total_loss = -1 * sum_probability_log\n perplexity: float = np.exp(total_loss / count_all_predict)\n logger.info(f\"perplexity: {perplexity}\")", "def detect_phrases(tmp_dir_sent, tmp_dir_phrases, common_words, min_count, threshold, max_layers=2):\n\n streamer = DocStreamer(tmp_dir_sent)\n phrases = Phrases(streamer, common_terms=common_words, min_count=min_count, threshold=threshold)\n\n starting_layer, next_layer = 1, 2\n\n while next_layer <= max_layers:\n start_time = datetime.now()\n phrases, found_new = train_layer(streamer, phrases, starting_layer, next_layer)\n end_time = datetime.now()\n elasped = end_time - start_time\n print(\"Finished layer {}\".format(elasped.seconds))\n if not found_new:\n print(\"No new phrases found at layer {}\".format(next_layer))\n break\n else:\n print(\"New phrases detected at layer {}\".format(next_layer))\n starting_layer += 1\n next_layer += 1\n\n phrases.save(os.path.join(tmp_dir_phrases, \"phrases.model\"))\n phrase_counts = Counter()\n\n print(\"Exporting Phrase Counts\")\n\n current_layer = 0\n while current_layer <= max_layers:\n ngrams_stream = stream_ngrams(tmp_dir_sent, phrases, current_layer)\n ngrams_export = phrases.export_phrases(ngrams_stream)\n phrase_counts.update(ngrams_export)\n current_layer += 1\n\n print(\"Finished Exporting Phrase Counts\")\n\n phrase_counts = list(phrase_counts.items())\n decoded_phrase_counts = []\n for word_pmi, count in phrase_counts:\n word, pmi = word_pmi\n word = word.decode()\n decoded_phrase_counts.append((word, pmi, count))\n decoded_phrase_counts.sort(key=lambda x: x[2], reverse=True)\n del phrase_counts\n with open(PHRASE_DUMP, 'w+', encoding='utf-8') as tfile:\n for phrase, pmi, count in decoded_phrase_counts:\n tfile.write(\"{}, {}, {}\".format(phrase.replace(\" \", \"_\"), pmi, count))\n tfile.write(\"\\n\")", "def generate_limittedmodel():\r\n print('Loading model')\r\n model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)\r\n print('Model loaded!')\r\n\r\n print('Loading dot products')\r\n dp = np.load(DP_NAME)\r\n print('Dot products loaded')\r\n\r\n print('Filtering vocab')\r\n for name, vocab in list(model.vocab.items()):\r\n if dp[vocab.index] < MAX_DEGREE:\r\n del model.vocab[name]\r\n\r\n il = list(model.vocab.items())\r\n print('Sorting vocab')\r\n il.sort(key=lambda x: x[1].index)\r\n\r\n # Find the indexes of the words that are being kept\r\n print('Generating indexes')\r\n indexes = []\r\n for i in range(0, len(il)):\r\n name, vocab = il[i]\r\n indexes.append(vocab.index)\r\n model.vocab[name].index = i\r\n\r\n print('Modifying model weights')\r\n model.syn0 = model.syn0[indexes]\r\n\r\n print('Saving file')\r\n model.save_word2vec_format(SAVE_NAME, binary=True)", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def test_word2vec():\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in range(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n print(\"==== Gradient check for skip-gram ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n print(\"\\n==== Gradient check for CBOW ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n\n print(\"\\n=== Results ===\")\n print(skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset))\n print(skipgram(\"c\", 1, [\"a\", \"b\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient))\n print(cbow(\"a\", 2, [\"a\", \"b\", \"c\", \"a\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset))\n print(cbow(\"a\", 2, [\"a\", \"b\", \"a\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient))", "def test_word2vec():\n dataset = type('dummy', (), {})()\n def dummySampleTokenIdx():\n return random.randint(0, 4)\n\n def getRandomContext(C):\n tokens = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n return tokens[random.randint(0,4)], \\\n [tokens[random.randint(0,4)] for i in range(2*C)]\n dataset.sampleTokenIdx = dummySampleTokenIdx\n dataset.getRandomContext = getRandomContext\n\n random.seed(31415)\n np.random.seed(9265)\n dummy_vectors = normalizeRows(np.random.randn(10,3))\n dummy_tokens = dict([(\"a\",0), (\"b\",1), (\"c\",2),(\"d\",3),(\"e\",4)])\n print(\"==== Gradient check for skip-gram ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n print(\"\\n==== Gradient check for CBOW ====\")\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient),\n dummy_vectors)\n gradcheck_naive(lambda vec: word2vec_sgd_wrapper(\n cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient),\n dummy_vectors)\n\n print(\"\\n=== Results ===\")\n print(skipgram(\"c\", 3, [\"a\", \"b\", \"e\", \"d\", \"b\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset))\n print(skipgram(\"c\", 1, [\"a\", \"b\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient))\n print(cbow(\"a\", 2, [\"a\", \"b\", \"c\", \"a\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset))\n print(cbow(\"a\", 2, [\"a\", \"b\", \"a\", \"c\"],\n dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset,\n negSamplingCostAndGradient))", "def train(self, data):\r\n pos_list = [\r\n \"adj\",\r\n \"adv\",\r\n \"adp\",\r\n \"conj\",\r\n \"det\",\r\n \"noun\",\r\n \"num\",\r\n \"pron\",\r\n \"prt\",\r\n \"verb\",\r\n \"x\",\r\n \".\",\r\n ]\r\n print(\"Train\")\r\n\r\n wordpos_list = [\r\n tuple([line[0][i], line[1][i]])\r\n for line in data\r\n for i in range(len(line[0]))\r\n ]\r\n\r\n pos_dict = {pos: {} for pos in pos_list}\r\n\r\n wordpos_count = Counter(wordpos_list)\r\n\r\n for w in wordpos_count:\r\n pos_dict[w[1]].update({w[0]: wordpos_count[w]})\r\n posterior_prob = {}\r\n emission_prob = {}\r\n for pos in pos_dict.keys():\r\n posterior_prob[pos] = float(sum(pos_dict[pos].values())) / len(wordpos_list)\r\n for pos in pos_dict.keys():\r\n emission_prob[pos] = {\r\n word: float(pos_dict[pos][word]) / sum(pos_dict[pos].values())\r\n for word in pos_dict[pos].keys()\r\n }\r\n\r\n trans_count = {}\r\n transition_prob = {}\r\n for pos in pos_list:\r\n trans_count[pos] = {}\r\n transition_prob[pos] = {}\r\n pair_list = [\r\n tuple([line[1][i], line[1][i + 1]])\r\n for line in data\r\n for i in range(len(line[1]) - 1)\r\n ]\r\n\r\n unique_list = list(set(pair_list))\r\n\r\n for element in unique_list:\r\n trans_count[element[0]].update({element[1]: pair_list.count(element)})\r\n\r\n for pos in pos_list:\r\n transition_prob[pos] = {pos: (1 / float(10 ** 8)) for pos in pos_list}\r\n for key, value in trans_count[pos].items():\r\n transition_prob[pos].update(\r\n {key: (value / float(sum(trans_count[pos].values())))}\r\n )\r\n initial_list = [line[1][0] for line in data]\r\n initial_count = Counter(initial_list)\r\n initial_prob = {\r\n pos: float(initial_count[pos]) / sum(initial_count.values())\r\n for pos in initial_count.keys()\r\n }\r\n self.position_list = pos_list\r\n self.emission_probability = emission_prob\r\n self.transition_probability = transition_prob\r\n self.initial_probability = initial_prob\r\n self.posterior_probability = posterior_prob", "def tts(model, text):\n\tif USE_CUDA:\n\t\tmodel = model.cuda()\n\t\n\t# NOTE: dropout in the decoder should be activated for generalization!\n\t# model.decoder.eval()\n\tmodel.encoder.eval()\n\tmodel.postnet.eval()\n\n\tsequence = np.array(text_to_sequence(text))\n\tsequence = Variable(torch.from_numpy(sequence)).unsqueeze(0)\n\tif USE_CUDA:\n\t\tsequence = sequence.cuda()\n\n\t# Greedy decoding\n\tmel_outputs, linear_outputs, gate_outputs, alignments = model(sequence)\n\n\tlinear_output = linear_outputs[0].cpu().data.numpy()\n\tspectrogram = audio._denormalize(linear_output)\n\talignment = alignments[0].cpu().data.numpy()\n\n\t# Predicted audio signal\n\twaveform = audio.inv_spectrogram(linear_output.T)\n\n\treturn waveform, alignment, spectrogram", "def naive(self, text):\n\t\t#print(text)\n\t\ttokenizedText = []\n\t\tfor k in text: #look at each entity in one sentence\n\t\t\t\n\t\t\ta = \"\"#stores the current word \n\t\t\trun = []; #appends all words in a particular sentence\n\t\t\tfor i in range(len(k)):\n\t\t\t\t\n\t\t\t\tif(k[i] == ' ' or k[i] == '\t'): #tokenization at space or tab\n\t\t\t\t\t\n\t\t\t\t\tif(a!=\"\"):\n\t\t\t\t\t\tif(a[-1] == ',' or a[-1] == '-' or a[-1] == \"\\'\" or a[-1] == \";\" or a[-1] == \":\" or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\\"\") : #but remove mentioned punctuations from the end of the word, if present\n\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):#remove starting quotes\n\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telif(i == len(k)-1): #remove the last punctuation mark, if present\n\t\t\t\t\t\n\t\t\t\t\ta = a+k[i];\n\t\t\t\t\t\n\t\t\t\t\tif(a[-1] == '.' or a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\'\" ):\n\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\n\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tif((k[i] == ',' or k[i] == ':' or k[i] == ';') and k[i+1]!= ' ' ): # for other punctuation marks followed by a space\n\t\t\t\t\t\t#print(k[i-1])\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\tif(a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" ):\n\t\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\ta = a+k[i];\n\n\t\t\ttokenizedText.append(run)\t\t\n\n\t\t\n\t\t\t\n\n\n\n\n\t\t#Fill in code here\n\n\t\treturn tokenizedText", "def make_text(chains):\n\n text = []\n nchars = 0\n\n # Starting ngram (as tuple), first word in tuple must be uppercase\n start = choice(get_uppercase(chains))\n\n # Add starting ngram to text list\n text.extend(start)\n\n # Add length of words in first bigram and two spaces to nchars\n nchars += len(start[0]) + len(start[1]) + 2\n\n while nchars < 119:\n # Choose next word randomly from list\n new_word = choice(chains[start])\n\n # add length of new word to nchars\n # add one for space between words\n nchars += len(new_word) + 1\n\n if nchars > 120:\n break\n else:\n # Add new word to text list\n text.append(new_word)\n\n # Generate tuple for next ngram\n new_key = start[1:] + (new_word,)\n\n # Break out of loop if bigram doesn't exist\n if new_key in chains:\n start = new_key\n else:\n break\n\n text.append(\"#hackbrightgracejan17\")\n\n # Find last sentence punctuation in text\n text_string = ' '.join(text)\n\n # period = text_string.rfind('.')\n # exclamation = text_string.rfind('!')\n # question = text_string.rfind('?')\n\n # largest = max(period, exclamation, question)\n\n # # Remove everything after the last punctuation, if there is anything\n # if len(text_string) == largest+1:\n # return text_string\n # else:\n # return text_string[:largest+1]\n\n return text_string", "def add_negative_samples(skipgram_data, unigrams_table, neg_examples_size=5):\n sg_neg_examples = []\n total_data = len(skipgram_data)\n for i, sg in tqdm(enumerate(skipgram_data), desc=\"Processing neg. samples ({} in total)\".format((total_data-1)),\n unit= \" neg. samples\"):\n for gram in sg:\n gram += negative_sampling(word_input=gram[0], target=gram[1],\n unigrams_table=unigrams_table, neg_examples_size=neg_examples_size)\n sg_neg_examples.append(gram)\n return sg_neg_examples", "def word_dropout(inp, target, p=0.0, training=True, reserved_codes=()):\n if not training or p == 0:\n return inp\n\n mask = word_dropout_mask(inp.data, p, reserved_codes=reserved_codes)\n\n return inp.masked_fill(Variable(mask), target)", "def __init__(\n self, \n dict_file = \"dictionary.txt\",\n model_file = \"count_1w.txt\", \n bigram_file = \"count_2w.txt\"\n ):\n self.dict_file = dict_file\n self.model_file = model_file\n self.bigram_file = bigram_file\n self.bigrams = defaultdict(lambda: defaultdict(lambda: 0))\n self.model = {} \n self.pop_model()\n self.pop_bigrams()", "def model(nr, nw, model_num):\r\n\r\n\treader = csv.reader(open(sys.argv[nr], \"r\"), delimiter='\\t') \r\n\tex = np.array(list(reader))\r\n\r\n\tlabel = np.zeros(len(ex))\r\n\r\n\twriter = open(sys.argv[nw], \"w\")\r\n\r\n\tif(model_num==1):\r\n\r\n\t\t# model - 1 = indicating which word occurs atleast once in the review from the dictionary\r\n\r\n\t\tfor i in range(len(ex)):\r\n\r\n\t\t\t# noting down the label assigned to each movie from review\r\n\t\t\tlabel[i] = ex[i][0]\r\n\r\n\t\t\twriter.write(str(int(label[i])))\r\n\t\t\t\r\n\t\t\tA = ex[i][1].split()\r\n\t\t\t\r\n\t\t\t# remove repetitive\r\n\t\t\tx = {} #empty dictionary to keep a track of repetitive words\r\n\r\n\t\t\tfor a in A:\t\t\t\t\r\n\t\t\t\tif(a in dict and dict[a] not in x):\t\r\n\t\t\t\t\t# adding a label 1 to words which are occuring in the review\r\n\t\t\t\t \tx[dict[a]]=1\r\n\t\t\t\t \twriter.write(\"\\t\")\r\n\t\t\t\t \twriter.write(dict[a]+\":1\")\r\n\t\t\t\t \t\r\n\t\t\twriter.write(\"\\n\")\r\n\r\n\telif(model_num==2):\r\n\r\n\t\t\"\"\"model - 2 = keeping a count of all the words in review and removing the words which are \r\n\t\toccuring more than 4 times as they may be just punctuation\"\"\"\r\n\r\n\t\tfor i in range(len(ex)):\r\n\t\t\t# noting down the label assigned to each movie from review\r\n\t\t\tlabel[i] = ex[i][0]\r\n\r\n\t\t\twriter.write(str(int(label[i])))\r\n\t\t\t\r\n\t\t\tA = ex[i][1].split()\r\n\t\t\t\r\n\t\t\tx = {} #dictionary to keep track of words occuring in review\r\n\r\n\t\t\tfor a in A:\r\n\t\t\t\t\r\n\t\t\t\tif(a in dict):\r\n\t\t\t\t\t# if word already there, add a count or else add it to the dictionary\r\n\r\n\t\t\t \t\tif(dict[a] in x):\r\n\t\t\t \t\t\tx[dict[a]]=x[dict[a]]+1\r\n\r\n\t\t\t \t\telif(dict[a] not in x):\r\n\t\t\t \t\t\tx[dict[a]]=1\r\n\t\t\t\r\n\t\t\t# to remove words which are occuring more than 4 times\r\n\t\t\ty = {}\r\n\t\t\tfor a in A:\r\n\t\t\t\tif(a in dict and x[dict[a]] < 4 and dict[a] not in y):\t\r\n\t\t\t\t\ty[dict[a]]=1\r\n\t\t\t\t\twriter.write(\"\\t\")\r\n\t\t\t\t\twriter.write(dict[a]+\":1\")\r\n\t\t\t\t\t\r\n\t\t\twriter.write(\"\\n\")", "def generate(size, data_dim=5, n_phrase_labels=4, n_words=3,\n n_phrase_words=3, n_phrases=5, label_noise=0.,\n min_sent_len=5, max_sent_len=5, tag_end=True):\n assert n_words < 256\n assert max_sent_len >= n_phrase_words\n global dictionary, phrases\n\n # generate dictionary\n dictionary = uniform(-1.0, 1.0, size=(n_words, data_dim))\n\n # generate n_phrases unique word sequences of length n_phrase_words\n print \"Generating %d phrases\" % n_phrases\n phrases = []\n phrase_labels = []\n while len(phrases) != n_phrases:\n phrases = np.unique(np.array([\"\".join(map(chr, randint(n_words, size=n_phrase_words)))\n for i in xrange(n_phrases)], dtype=np.object))\n assert np.unique(map(len, phrases)) == n_phrase_words\n phrase_labels = 1+randint(n_phrase_labels-1, size=n_phrases)\n\n # generate 'sentences'\n print \"Generating %d sentences\" % sum(size)\n Xind = []\n Y = []\n for i in xrange(sum(size)):\n while True:\n sent_len = randint(min_sent_len, max_sent_len+1)\n sent = \"\".join(map(chr, randint(n_words, size=sent_len)))\n if contains_any_phrase(sent, phrases):\n print \".\",\n break\n Y.append(np.zeros(sent_len,dtype=np.int))\n Xind.append(sent)\n\n # generate labels for dataset\n print \"Generating labels for the sentences...\"\n for phrase, plabel in zip(phrases, phrase_labels):\n for idx, sent in enumerate(Xind):\n start = 0\n while True:\n sidx = sent.find(phrase, start)\n if sidx < 0:\n break\n if tag_end:\n Y[idx][sidx+len(phrase)-1] = plabel\n else:\n Y[idx][sidx] = plabel\n start += 1\n\n print \"Trafo...\"\n # transform dataset to code\n if data_dim > 1:\n X = [[dictionary[ord(c)] for c in sent] for sent in Xind]\n else:\n X = [[ord(c) for c in sent] for sent in Xind]\n\n Xtrain, Xtest = X[:size[0]], X[size[0]:]\n Ytrain, Ytest = Y[:size[0]], Y[size[0]:]\n\n # training label noise\n for sent in Ytrain:\n mask = uniform(size=sent.size) < label_noise\n sent[mask] = randint(n_phrase_labels, size=mask.sum())\n print \"Done.\"\n\n return Xtrain, Xtest, Ytrain, Ytest", "def build_model(self, text):\n text = '< ' * (self.n - 1) + text.replace(' . ', ' .%s ' % (' <' * (self.n - 1))) + ' >'\n tokens = self.split(text)\n self.corpus_len = len(tokens)\n self.n_grams_by_len = [{} for _ in range(self.n)]\n for i in range(len(tokens)): # for index in tokens\n for n in range(self.n): # for n-gram size from 1 to n:\n if i >= n: # if the index has advanced enough for this n\n n_gram = self.join(tokens[i - n: i + 1])\n n_grams = self.n_grams_by_len[n] # get dict for respective n\n n_grams[n_gram] = n_grams.get(n_gram, 0) + 1 # get dict for respective n\n return self.get_model()", "def predictSpam(self, text):\n cv, mnb = self.loadpklfile(self.config['transform_path']\n ['transform_model_path'], \n self.config['nlp_path']['model_path'])\n vect = cv.transform(text).toarray()\n my_pred = mnb.predict(vect)\n return my_pred", "def parallel_sequential_generation(seed_text, batch_size=10, mask_len=14, top_k=0, temperature=None, max_iter=300, burnin=200,\n cuda=False, print_every=10, verbose=True):\n seed_len = len(seed_text)\n batch = get_init_text(seed_text, mask_len, batch_size)\n \n for ii in range(max_iter):\n kk = np.random.randint(0, mask_len) if np.random.randint(0,2) == 0 else np.random.randint(seed_len + mask_len, seed_len + 2* mask_len)\n for jj in range(batch_size):\n batch[jj][kk] = mask_id\n inp = torch.tensor(batch).cuda() if cuda else torch.tensor(batch)\n out = model(inp)\n topk = top_k if (ii >= burnin) else 0\n idxs = generate_step(out, gen_idx=kk, top_k=topk, temperature=temperature, sample=(ii < burnin))\n idxs = idxs if hasattr(idxs, \"__getitem__\") else [idxs]\n for jj in range(batch_size):\n batch[jj][kk] = idxs[jj]\n \n if verbose and np.mod(ii+1, print_every) == 0:\n for_print = tokenizer.convert_ids_to_tokens(batch[0])\n for_print = for_print[:kk+1] + ['(*)'] + for_print[kk+1:]\n print(\"iter\", ii+1, \" \".join(for_print))\n \n return untokenize_batch(batch)", "def train():\n num_spam=0 \n num_ham=0\n spam_words=()\n ham_words=()\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n #print(dataArray)\n dataArrayTrain=dataArray[0:21300] #opens training set from folder 000-070\n \n for eachLine in dataArrayTrain:\n kind,file = eachLine.split(' ')\n file=file.strip('../') \n #print(kind)\n #print(file)\n \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n print(filepath)\n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n #print(email)\n email_words = processText(contentEmail(email))\n #print(email_words)\n email_words = tuple(list(set(email_words))) #converted it into a set to avoid repetition of words in every email\n #print(email_words)\n if (kind == \"spam\"):\n num_spam+=1 #counts how many spam emails\n spam_words= spam_words + tuple(email_words) #adds every word to a spam tuple\n\n elif (kind==\"ham\"):\n num_ham+=1 #counts how many ham emails\n ham_words= ham_words + tuple(email_words) #adds every word to a ham tuple\n\n spam_words= tuple(spam_words)\n ham_words= tuple(ham_words)\n\n \n count_spam = collections.Counter(spam_words) #counts how many times a words appears in all spam emails\n count_ham = collections.Counter(ham_words) #counts how many times a words appears in all ham emails\n total_count = (count_spam + count_ham).most_common(5000) #adds the total occurences of the words and gets top 5000\n #print(total_count)\n #print(num_ham, num_spam)\n\n top_words = []\n for everyword in total_count:\n top_words.append(everyword[0])\n for everyword in list(count_spam):\n if everyword not in top_words:\n del count_spam[everyword] #deletes words in spam emails not included in top 5000\n for everyword in list(count_ham):\n if everyword not in top_words:\n del count_ham[everyword] #deletes words in ham emails not included in top 5000\n #print(words, count_ham, count_spam)\n\n file_encoder = open(\"top_word_count.txt\", \"w+\", encoding = 'utf-8', errors = 'ignore')\n file_encoder.write(\"HERE ARE YOUR TOP 5000 WORDS: \"+\"\\n\"+str(total_count)+\"\\n\"+\"\\n\"+\"SPAM WORDS: \"+\"\\n\"+str(count_spam)+\"\\n\"+\"\\n\"+\"HAM WORDS: \"+\"\\n\"+str(count_ham))\n file_encoder.close()\n print(\"Counting and getting top 5000 words successful!\")\n probabilityGet(num_spam, num_ham, count_spam, count_ham)" ]
[ "0.7335781", "0.6271761", "0.6103409", "0.5918872", "0.5912268", "0.59084326", "0.5804064", "0.57811296", "0.56691426", "0.5648485", "0.5640333", "0.5547922", "0.5508764", "0.54452556", "0.5404064", "0.5391015", "0.53892845", "0.5379182", "0.53635854", "0.535121", "0.5332329", "0.5331775", "0.53032625", "0.5298148", "0.52437174", "0.5218011", "0.51996666", "0.51996666", "0.5194956", "0.5178328", "0.51723516", "0.51703745", "0.5170245", "0.5169857", "0.5169172", "0.5152449", "0.5137616", "0.5126353", "0.51245385", "0.51231265", "0.5120134", "0.5088079", "0.5079219", "0.50750333", "0.5071189", "0.5060351", "0.50599915", "0.5056527", "0.5036733", "0.50291854", "0.50115937", "0.5007772", "0.5006519", "0.4979169", "0.4974128", "0.49642107", "0.49610165", "0.49513495", "0.49510667", "0.49456203", "0.4939197", "0.49308786", "0.49290615", "0.4927556", "0.49210382", "0.49180925", "0.49141636", "0.49010488", "0.49010488", "0.49010488", "0.48929623", "0.4892489", "0.48861933", "0.48752823", "0.48726776", "0.48724526", "0.4872016", "0.48682404", "0.48673627", "0.48591977", "0.4857835", "0.4856036", "0.48553962", "0.48513654", "0.4850381", "0.4844309", "0.4841238", "0.4841238", "0.4817944", "0.48114565", "0.4807748", "0.4802787", "0.4797895", "0.4791372", "0.4788949", "0.4776685", "0.4770769", "0.47587904", "0.47575185", "0.47547582", "0.47520182" ]
0.0
-1
Function to join path and filename.
def pjoin(self, in_dir, file_name): return os.path.join(in_dir, file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def join(path: str, fileName: str) -> str:\n if os.name == 'nt':\n return ntpath.join(path, fileName)\n else:\n return posixpath.join(path, fileName)", "def filename_path_join(path, filename):\n\n # Raise an error if filename is None\n if filename is None:\n raise ValueError(\"Cannot create a path to a filename set as None\")\n\n # Return an absolute path unchanged\n elif os.path.isabs(filename):\n return filename\n\n # If path is set, join filename to it and return that\n elif path is not None:\n return InferelatorDataLoader.make_path_safe(os.path.join(path, filename))\n\n # If path is not set, convert the filename to absolute and return it\n else:\n return InferelatorDataLoader.make_path_safe(filename)", "def join(self, fname):\n return os.path.abspath(os.path.expanduser(os.path.join(self.path, fname)))", "def join_path(*args):\n return os.path.join(*args)", "def joinPath(path, *args):", "def _path_join(self, path):\n return os.path.join(self._path, path)", "def join(path, *paths: str) -> str:\n pass", "def join_path(base, name, ext):\n return os.path.join(base, name + ext)", "def join(self, path, *paths):", "def join_path(tuple_path):\n return os.path.join(tuple_path[1], tuple_path[1] + tuple_path[2])", "def join_infile_path(*paths):\n # Join path components\n path = '/'.join(paths)\n # Correct double slashes, if any is present\n path = path.replace('//', '/')\n\n return path", "def join_path(self, path_parts):\n return os.path.sep.join(path_parts)", "def join(path, *paths):\n\n for p in paths:\n if p.startswith(\"/\"):\n path = p\n elif p != \"\":\n path += (\"\" if path == \"\" or path.endswith(\"/\") else \"/\") + p\n return path", "def zenpathjoin(self, path):\n return zenpathjoin(path)", "def aix_path_join(path_one, path_two):\n if path_one.endswith('/'):\n path_one = path_one.rstrip('/')\n\n if path_two.startswith('/'):\n path_two = path_two.lstrip('/')\n\n final_path = path_one + '/' + path_two\n return final_path", "def join(self, *args):\n return os.path.join(self.directory, *args)", "def join_path(base, *args):\n\tfilepath = base\n\tfor arg in args:\n\t\tfilepath = filepath + cfg.SEP_COMM + arg\n\tfilepath = filepath.replace( '//', cfg.SEP_COMM)\n\treturn filepath", "def joinwith(self, path):\n\n return path.joinpath(self._value)", "def join(cls, *args):\n return AbsolutePath(os.path.join(*(str(piece) for piece in args)))", "def join(*paths):\r\n path = \"\"\r\n for component in paths:\r\n path += (\"/\" if path and not path.endswith(\"/\") else \"\") + component.replace(\r\n \"\\\\\", \"/\"\r\n )\r\n return path", "def path_join(first: str, second: str) -> str:\n first = first.rstrip('/\\\\')\n second = second.lstrip('/\\\\')\n if not first: return second\n if not second: return first\n return first + '/' + second", "def join_path(list):\n return functools.reduce(os.path.join, list)", "def __make_path(self, filename):\n return self.__path() + os.sep + filename", "def str_join(paths: []):\n return \"/\".join(paths)", "def join(self, *parts):\n if parts:\n parts = list(parts)\n if len(parts) > 1:\n for i, p in enumerate(parts[:-1]):\n parts[i] = p.strip('/')\n parts[-1] = parts[-1].lstrip('/')\n return '/'.join(parts)", "def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)", "def join_paths(path_1, path_2):\r\n a = lib_path.join(path_1, path_2)\r\n return a", "def build_file_path(dir_name, file_name, ext):\n return os.path.join(dir_name, os.path.extsep.join((file_name, ext)))", "def savePathJoin(self, path):\n return os.path.join(self.user[\"Save\"], path)", "def generate_path(root_path,filename):\n file_path = os.path.join(\n root_path,\n filename[0:2],# First two chars of filename\n filename[2:4],# Second two chars of filename\n filename[4:6],# Third two chars of filename\n filename\n )\n return file_path", "def combine_path(base_path, relative_ref):\n if (base_path != \"\"):\n os.chdir(base_path)\n # Handle if .tex is supplied directly with file name or not\n if relative_ref.endswith('.tex'):\n return os.path.join(base_path, relative_ref)\n else:\n return os.path.abspath(relative_ref) + '.tex'", "def get_file_path(filename, path='Data/'):\n path= os.path.abspath(os.path.dirname(path))\n return os.path.join(path, filename)", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def getPath(self, date, sep = '/'):\n\n return sep.join( [self.getDirName(date), self.getFileName(date)] )", "def append_path(path1, path2):\n\n # Get the first absolute path\n abs_path1 = abspath(path1)\n\n # Return the joined paths\n return os.path.join(abs_path1, path2).replace(\"\\\\\", \"/\")", "def normalized_join(path1: str, *pathsN) -> str:\n return normalized_path(os.path.join(path1, *pathsN))", "def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n name = filepath_name_only(filepath)\n return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))", "def combine_folder_and_file_name(folder_name, file_name):\n combined_name = os.path.normpath(\n f'{folder_name}{\"/\" if folder_name else \"\"}{file_name}')\n combined_name = os.path.normpath(combined_name)\n\n return combined_name", "def pathToFileName(self, path):\n\t\t# Find the path, and strip the leading slash.\n\t\tpath =urlparse.urlparse(self.path)[2].lstrip(\"/\")\n\t\t# Process url escape codes, and normalize the path.\n\t\tpath = os.path.normpath(urllib2.unquote(path))\n\t\t# normpath strips the last slash\n\t\tif os.path.isdir(path):\n\t\t\treturn path + '/'\n\t\telse:\n\t\t\treturn path", "def getpath(self, path):\n return self._join(path)", "def lpath(file0, file1):\n return os.path.abspath(os.path.join(os.path.dirname(file0), file1))", "def relativize(path: str):\n return join('.', path)", "def testJoin(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n data={\r\n # 1\r\n 'relativePath':\r\n ['/dir1/',P('dir2/fileBase.ext'),'/dir1/dir2/fileBase.ext'],\r\n\r\n # 2\r\n 'absolutePath':\r\n ['/dir1/',P('/dir2/fileBase.ext'),'/dir2/fileBase.ext'],\r\n\r\n # 3\r\n 'notSeparatorTerminatedPath':\r\n ['dir1',P('dir2/fileBase.ext'),'dir1/dir2/fileBase.ext'],\r\n\r\n # 4\r\n 'emptyPath':\r\n ['dir1',P(''),'dir1/'],\r\n\r\n # 5\r\n 'nonNativePath':\r\n ['dir1',ufsi.HttpPath('http://www.google.com.au/'),\r\n 'http://www.google.com.au/']\r\n }\r\n\r\n for k in data.iterkeys():\r\n p1=P(data[k][0])\r\n p2=data[k][1]\r\n r1=str(p1.join(p2))\r\n r2=data[k][2]\r\n self.assertEquals(r1,r2,\r\n '%s: join result was %r but should have been %r'\r\n %(k,r1,r2))", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def composePath(self,splitedPath):\n # 027 It is used anywhere?? Nope!! Remove!\n\n self.debug.printHeader()\n return os.sep.join(splitedPath)", "def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath:\n sep = self.sep\n strargs = [os.fspath(arg) for arg in args]\n strpath = self.strpath\n if abs:\n newargs: list[str] = []\n for arg in reversed(strargs):\n if isabs(arg):\n strpath = arg\n strargs = newargs\n break\n newargs.insert(0, arg)\n # special case for when we have e.g. strpath == \"/\"\n actual_sep = \"\" if strpath.endswith(sep) else sep\n for arg in strargs:\n arg = arg.strip(sep)\n if iswin32:\n # allow unix style paths even on windows.\n arg = arg.strip(\"/\")\n arg = arg.replace(\"/\", sep)\n strpath = strpath + actual_sep + arg\n actual_sep = sep\n obj = object.__new__(self.__class__)\n obj.strpath = normpath(strpath)\n return obj", "def join(self, path, *paths):\n if not self.is_managed_path(path):\n return os.path.join(path, *paths)\n client, _ = self._get_storage(path)\n prefix, rel_path = self.parse_managed_path(path)\n return '%s:%s' % (prefix, client.join(rel_path, *paths)) # Only join the actual path.", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def format_path(file: str) -> str:\n return os.path.abspath([file.replace('/', os.path.sep)][0])", "def make_file_name(name):\n expanded_path = os.path.expandvars(make_fp_rel(name))\n return expanded_path", "def convertPath (source, target, filename):\n\tfrom os.path import join as joinPath\n\tfrom os import sep\n\n\t# Get the source path informations\n\tdirSrc = filenameSplit (source)[1]\n\n\t# Get the target path informations\n\tdiskDst, dirDst, nameDst, extDst = filenameSplit (target)\n\n\t# Get the current file informations\n\tdummy, dirFil, nameFil, extFil = filenameSplit (filename)\n\n\t# Build the target path\n\tdir_ = normalizePath(dirDst + sep + dirFil[len(dirSrc):len(dirSrc) + len(dirFil)-len(dirSrc)])\n\n\t# Add the target filename\n\tname = convertFilename (nameDst,nameFil)\n\n\t# Add the target extension\n\text = convertFilename (extDst,extFil)\n\n\treturn diskDst + joinPath(dir_, name + ext)", "def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")", "def buildPath(dir, file_name, ext, description = None):\n\tpath = os.path.join(dir, file_name)\n\tbase, _ = os.path.splitext(path)\n\tif not description == None:\n\t\tbase = base + '.' + description\n\treturn base + ext", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))", "def _join_path(\n year: int, day: int, session: str, file_type: Optional[str] = None\n) -> str:\n cache_location = user_cache_dir(appname=\"advent-of-code\")\n cache_file = os.path.join(cache_location, str(session), str(year), str(day))\n if file_type == \"input_file\":\n cache_file = os.path.join(cache_file, \"input.txt\")\n if file_type == \"submission_file\":\n cache_file = os.path.join(cache_file, \"submission.txt\")\n if file_type == \"last_time_file\":\n cache_file = os.path.join(cache_file, \"time.txt\")\n return cache_file", "def rel(*x):\n return join(abspath(dirname(__file__)), *x)", "def get_full_path(file_extension=True) -> str:\n return get_directory() + \"/\" + get_filename(file_extension=file_extension)", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)", "def join_path(self, template, parent):\n if (template.startswith('./')):\n return os.path.join(os.path.dirname(parent), template)\n return template", "def urljoin(base, *path):\n return reduce(_join, path, base)", "def get_file_path(filename):\n if 'http' in filename:\n parsed_uri = urlparse(filename)\n f = '/' + parsed_uri.path[1:]\n f = '/'.join(f.split('/')[3:]) # split the xxx dir, remove the leading /\n else:\n filename = ('/' + filename) if filename[0] != '/' else filename # make sure starts with /\n # split local img path from path\n f = filename.replace(settings.FILE_PATH, '/')\n f = f.replace(settings.IMAGE_PATH, '/')\n f = f.replace(settings.DERIVED_PATH, '/')\n f = '/'.join(f.split('/')[2:]) # split the xxx dir, remove the leading /\n\n return f", "def get_aug_path(file_path: str) -> str:\n return \"/files%s\" % file_path", "def testJoinPath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n path_segments = os.path.split(test_file_path)\n\n path = test_helper.JoinPath(path_segments)\n self.assertEqual(path, test_file_path)", "def fpath(self):\n return os.path.join(self.path, self.name)", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def urljoin(*args):\n\n return \"/\".join(map(lambda x: str(x).rstrip('/'), args))", "def _url_join(self, *parts):\n return \"/\".join(map(lambda fragment: fragment.rstrip('/'), parts))", "def path_to(self, course_id, filename):\r\n return os.path.join(self.root_path, urllib.quote(course_id.to_deprecated_string(), safe=''), filename)", "def get_filename(self, path, prefix, suffix, date, period):\n return os.path.join(path,\n '%s%s%s' % (\n prefix,\n self.get_filename_date(date,\n params=dict(period=period)),\n suffix))", "def create_file_path(fname, direc=\"data/result/\"):\n path = os.path.join(TOP_LEVEL, direc, fname)\n return path", "def file_path(instance, filename):\n hashcode = hash(filename)\n mask = 255 # bitmask\n # use the first and second bytes of the hash code represented as\n # zero-padded hex numbers as directory names\n # provides 256 * 256 = 65536 of possible directory combinations\n dir1 = \"{:0>2x}\".format(hashcode & mask)\n dir2 = \"{:0>2x}\".format((hashcode >> 8) & mask)\n # Galaxy doesn't process names with parentheses in them\n filename = re.sub('[()]', '_', filename)\n return os.path.join(dir1, dir2, filename)", "def _construct_path(self, sep, with_drive_letter=True):\n result = sep.join(self._components)\n if self._absolute:\n result = \"{}{}\".format(sep, result)\n if with_drive_letter and self._drive_letter:\n result = \"{}:{}\".format(self._drive_letter, result)\n return result", "def\textractPathFromPathfilename(self,fullPathFilename):\n\t\treturn(fullPathFilename[0:fullPathFilename.rfind('\\\\')+1])", "def rel_filename(filename, relative_to=None):\n if relative_to is None:\n relative_to = os.getcwd()\n if not relative_to.endswith(os.path.sep):\n relative_to += os.path.sep\n filename = os.path.normpath(os.path.abspath(filename))\n if filename.startswith(relative_to):\n return filename[len(relative_to):]\n else:\n return filename", "def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]", "def url_path_join(*fragments):\n fragments = fragments or (\"\",)\n result = fragments[0] # Tolerate an empty list\n for thing in fragments[1:]:\n result = result.rstrip(\"/\") + \"/\" + thing.lstrip(\"/\")\n return result", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def join_path(self, template: str, parent: str) -> str:\n return template", "def get_file_path(filename: str):\n return TEMP_DIR.joinpath(filename)", "def get_full_filename(dirname, name, ext, tmstamp=False):\n fill = '_' + str_current_time() if tmstamp else ''\n fmt = '/{}{}{}' if ext.startswith('.') else '/{}{}.{}'\n return resolve(dirname) + fmt.format(name, fill, ext)", "def url_path_join(*pieces):\n initial = pieces[0].startswith(\"/\")\n final = pieces[-1].endswith(\"/\")\n stripped = [s.strip(\"/\") for s in pieces]\n result = \"/\".join(s for s in stripped if s)\n if initial:\n result = \"/\" + result\n if final:\n result = result + \"/\"\n if result == \"//\":\n result = \"/\"\n return result", "def _path_to_string(path):\n return '.'.join(path)", "def build_path(*path_elements):\n return path.abspath(path.expanduser(path.join(*path_elements)))", "def destination_name(path: list[str], delimiter: str = \"__\") -> str:\n return f\"{delimiter.join(path)}\"", "def make_fs_path(parts):\n return '/'.join(parts)", "def attach_path(path):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), path)", "def getPath(filename):\n\n if os.path.isabs(filename):\n pathfile = filename\n else:\n filename = filename.lstrip('/\\.')\n filename = filename.replace('/', '\\\\')\n pathfile = os.path.join(os.getcwd(), filename)\n \n return pathfile", "def join_url(*args): # type: (*str) -> str\n parts = [part[:-1] if part and part[-1] == '/' else part for part in args]\n parts.append('')\n return '/'.join(parts)", "def completePath(path):\n return os.getcwd() + convertString(path)", "def path_to_file(fname, dirs):\n for each in dirs:\n path = '/'.join([each, fname])\n if os.path.exists(path):\n return path\n return None", "def qualify_full_filepath(filename, path=None):\n filepath = os.path.join(path or \"\", filename)\n if not os.path.isfile(filepath):\n raise OSError(f\"No available file found at: {filename}.\")\n return filepath", "def format_path(path):\n return path if path.endswith('/') else path + '/'", "def generate_new_filename(path, key, encrypt):\n # init fermet\n f = Fernet(key)\n # split path and filename\n filename = path\n fullpath = \"\"\n if \"/\" in path:\n fullpaths = path.split(\"/\")\n filename = fullpaths[-1]\n fullpath = \"\"\n for x in fullpaths:\n if x != filename:\n fullpath += x + \"/\"\n\n if encrypt:\n # encode filename\n filename = f.encrypt(filename.encode(\"utf-8\")).decode(\"utf-8\")\n else:\n # decode filename\n filename = f.decrypt(filename.encode(\"utf-8\")).decode(\"utf-8\")\n\n return fullpath + filename", "def _formatPath(directoryPath, filePath):\n return directoryPath + \"\\\\\" + filePath", "def build_relative_path(full_path, prefix='/', split_on='/data/'):\n splits = full_path.split(split_on)\n return os.path.join(prefix, split_on, splits[-1])", "def path(self, *path):\n path = list(filter(None, path))\n path = self.remove_prefix(path)\n items = [self.prefix_] + path\n return self.delimiter.join(items)", "def path_join_robust(path, *paths):\n\n try:\n # gh-316: joining unicode and str can be saddening in Python 2.x\n path = str(path)\n paths = [str(another_path) for another_path in paths]\n\n return os.path.join(path, *paths)\n except UnicodeDecodeError as e:\n raise locale.Error(\n \"Unable to construct path. This is likely a LOCALE issue:\\n\\n\" + str(e)\n )" ]
[ "0.8078713", "0.7777003", "0.77231276", "0.7715079", "0.7641831", "0.76057595", "0.7435873", "0.74198025", "0.7322059", "0.71884507", "0.71656954", "0.7080947", "0.7009298", "0.694353", "0.69216216", "0.6880216", "0.6874053", "0.6855651", "0.68301845", "0.6820693", "0.681826", "0.675044", "0.67391", "0.66084456", "0.6579646", "0.6575668", "0.6517309", "0.6450567", "0.6444786", "0.643784", "0.6422696", "0.6407613", "0.63663775", "0.63503844", "0.6310524", "0.6303747", "0.62886745", "0.6287378", "0.62738854", "0.62618834", "0.6251089", "0.6247297", "0.6245866", "0.62457323", "0.6234808", "0.6230717", "0.62213165", "0.62055033", "0.620455", "0.6197647", "0.6164057", "0.6158123", "0.61321306", "0.6130567", "0.61102176", "0.609053", "0.60629207", "0.60580564", "0.6048071", "0.6031638", "0.6021098", "0.6017163", "0.59986174", "0.5993551", "0.5985571", "0.5984959", "0.59842795", "0.59804577", "0.5978118", "0.5962656", "0.5954575", "0.5950873", "0.5942626", "0.5921606", "0.59211326", "0.590885", "0.5897686", "0.5889941", "0.58810586", "0.58731896", "0.5858581", "0.58429897", "0.5840958", "0.58230036", "0.5815921", "0.5806204", "0.579886", "0.57966864", "0.5795251", "0.57905006", "0.5780332", "0.5775289", "0.5774619", "0.57673323", "0.5762692", "0.57443553", "0.57436216", "0.5739353", "0.57372165", "0.57180476" ]
0.7675447
4
Function that loads and sets the necessary variables.
def set_variables(self): root_dir = os.path.dirname(os.path.realpath(__file__)) self.scratch_dir = os.path.join(root_dir, 'scratch') self.input_dir = os.path.join(root_dir, 'input_data') self.web_dir = os.path.join(root_dir, 'webserver') #os.chdir(self.scratch_dir) # Input data BIOSAFE self.legal_weights = pd.read_csv( self.pjoin(self.input_dir, 'legalWeights.csv'), index_col = 0) self.links_law = pd.read_csv( self.pjoin(self.input_dir, 'linksLaw.csv'), index_col = 0) self.links_eco1 = pd.read_csv( self.pjoin(self.input_dir, 'linksEco.csv'), index_col = 0) self.lut = pd.read_excel( self.pjoin(self.input_dir, 'BIOSAFE_20190711.xlsx'), sheet_name = 'lut_RWES').fillna(method='ffill') # this lookup table (lut) has: # ecotope codes of BIOSAFE in the 1st column: oldEcotope # aggregated/translated ectotopes in 2nd column: newEcotope # Ecotopes used in Virtual River self.vr_eco = pd.read_csv( self.pjoin(self.input_dir, 'VR_ecotopes.csv')) # Aggregate BIOSAFE ecotopes into RWES ecotopes self.links_eco2 = bsf.aggregateEcotopes(self.links_eco1, self.lut) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)", "def load_variables(cls):\n cls._variablesDict = fileops.get_json_dict(cls.get_variables_filepath())", "def init_vars(self):\n\n load_dotenv()\n self.smart_cube = True if os.environ.get(\"SMART_CUBE\") == \"True\" else False\n self.gen_parsed_to_cubedb = True if os.environ.get(\"GEN_PARSED_TO_CUBEDB\") == \"True\" else False\n self.name_of_solve = os.environ.get(\"NAME_OF_SOLVE\")\n self.time_solve = os.environ.get(\"TIME_SOLVE\")\n self.comms_unparsed_bool = True if os.environ.get(\"COMMS_UNPARSED\") == \"True\" else False\n self.gen_with_move_count = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.diff_to_solved_state = float(os.environ.get(\"DIFF_BETWEEN_ALGS\"))\n self.parse_to_lp = True if os.environ.get(\"PARSE_TO_LETTER_PAIR\") == \"True\" else False\n self.gen_with_moves = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.buffer_ed = self.get_buffer_ed(os.environ.get(\"EDGES_BUFFER\"))\n self.buffer_cor = self.get_buffer_cor(os.environ.get(\"CORNER_BUFFER\"))\n self.path_to_lp = os.environ.get(\"PATH_LETTER_PAIR_FILE\")\n self.dict_lp = self.load_letter_pairs_dict()", "def _init_vars_():\n\n global ADMIN_USERNAME\n ADMIN_USERNAME = _fetch_ssm_parameter(\n '/app/MDL/{}/{}/LDAP/User/HerdAdminUsername'\n .format(INSTANCE_NAME, ENVIRONMENT), False)\n\n global ADMIN_PASS\n ADMIN_PASS = _fetch_ssm_parameter(\n '/app/MDL/{}/{}/LDAP/Password/HerdAdminPassword'.format(INSTANCE_NAME,\n ENVIRONMENT),\n True)\n\n global HERD_REST_BASE_PATH\n HERD_REST_BASE_PATH = 'herd-app/rest'\n\n global HERD_BASE_URL\n HERD_BASE_URL = '{}-herd.dev.aws.cloudfjord.com'.format(INSTANCE_NAME)\n\n global HERD_HEADERS\n HERD_HEADERS = {\n 'Accept': 'application/json',\n 'Content-type': 'application/json'\n }\n\n global S3_HOME_BUCKET\n S3_HOME_BUCKET = _fetch_ssm_parameter('/app/MDL/{}/{}/S3/MDL'\n .format(INSTANCE_NAME, ENVIRONMENT),\n False)\n\n global BDSQL_MASTER\n BDSQL_MASTER = _fetch_ssm_parameter('/app/MDL/{}/{}/Bdsql/MasterIp'\n .format(INSTANCE_NAME, ENVIRONMENT),\n False)", "def set_vars():\n return dict()", "def init_vars(self):\n # type: () -> None\n raise NotImplementedError", "def updateVariables(self) -> None:\n ...", "def init_locals(self):\n pass", "def set_parameters(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.TSR, cls.RPM, cls.RAD, cls.BLA, cls.CHR, cls.SEC, cls.NT = \\\r\n np.loadtxt('settings.csv', delimiter=',', skiprows=1, unpack=True)", "def _initialize_variables(self):\n\n self.font = Font()\n self.BibTerm = ''\n\n self.illegalChars = [chr(i) for i in range(1, 0x20)]\n self.illegalChars.extend([chr(0x7F), '\"', '*', '/', ':', '<', '>', \\\n '?', '\\\\', '|'])\n\n #define all StringVar(), BooleanVar(), etc… needed to hold info\n self.current_project = StringVar()\n self.dict_in = StringVar()\n self.terms_in = StringVar()\n self.old_dict = StringVar()\n self.dict_in_changed = IntVar()\n self.terms_in_changed = IntVar()\n self.old_dict_changed = IntVar()\n self.add_cldr_fields = IntVar()\n self.accept_regional_digits = IntVar()\n self.selected_lang = StringVar()\n self.int_var = IntVar()\n self.preferred = StringVar()\n self.PrefChar = StringVar()", "def _init_vars(self):\n if not self._has(\"vars\"):\n if self._has(\"p\"):\n self._.vars = self._.p.variables()\n elif self._has(\"q\"):\n self._.vars = self._.q.variables()\n elif self._has(\"P\"):\n self._.vars = variables(self._.P)\n elif self._has(\"Q\"):\n self._.vars = variables(self._.Q)\n self._.vars_ordered = len(self._.vars) <= 1", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def initialize_variables(self):\n self.sess.run(self.init)", "def set_variables(self, variables):\n self.variables = variables", "def load_variables(mod):\n for k in dir(mod):\n if '__' not in k:\n globals()[k] = getattr(mod, k)", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def setup_params():\n Script.fullname = os.path.splitext(os.path.abspath(__file__))[0]\n Script.basename = os.path.basename(__file__)\n Script.name = os.path.splitext(Script.basename)[0]\n Script.service = modUtils.check_service(Script.name)", "def _init_tkvars(self,PO):\n for name,param in PO.params().items():\n self._create_tkvar(PO,name,param)", "def prepare_vars_and_flows(self):\n\n # clear previously populated vars dictionary\n self.vars.clear()\n\n # calculate vars and flows sequentially\n self.calculate_scaleup_vars()\n self.calculate_vars()\n self.calculate_flows()", "def __init__(self, **variables):\n vars(self).update(variables)", "def setup_vars(self):\n # Add Full time positions\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database', 'OS', 'AI']\", 30)\n self.admin_id = self._add_person(\"Admin\", salary=40)\n self.full_instructor_id = self._add_person(\n \"Instructor\", \"ARRAY['Database']\", 20\n )\n\n # Add Part time instructor\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['OS']\", 10)\n self.part_instructor_id = self._add_part_time_instr(\"ARRAY['AI']\", 10)\n\n # Add courses\n self.course_id1 = self._add_course(\"Database\", 1, \"Database\")\n self.course_id2 = self._add_course(\"OS\", 1, \"OS\")\n self.course_id3 = self._add_course(\"AI\", 1, \"AI\")\n\n # Add room\n self.room_id = self._add_room(1, 'Test room', 20)\n self.room_id2 = self._add_room(2, 'Test room 2', 20)\n\n # Add course offerings\n self.course_offering1 = self._add_course_offering('2021-01-21', 10, [('2021-06-21', 9, self.room_id), ('2021-06-21', 11, self.room_id)], '2021-05-31', 20, self.course_id1, self.admin_id)\n self.course_offering2 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id), ('2021-06-22', 11, self.room_id)], '2021-05-31', 20, self.course_id2, self.admin_id)\n self.course_offering3 = self._add_course_offering('2021-01-21', 10, [('2021-06-22', 9, self.room_id2), ('2021-06-22', 11, self.room_id2)], '2021-05-31', 20, self.course_id3, self.admin_id)\n\n # Add customers\n self.customer_id1 = self._add_customer('Test1', \"test\", 987654321, 'test@test.com', '1234123412341234', '123', '2025-05-31')\n self.customer_id2 = self._add_customer('Test2', \"test\", 987654321, 'test@test.com', '1234123412341235', '123', '2025-05-31')\n self.customer_id3 = self._add_customer('Test3', \"test\", 987654321, 'test@test.com', '1234123412341236', '123', '2025-05-31')\n\n # Register sessions\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id1)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id2)\n self._register_credit_card('2021-01-21', self.course_id1, 1, self.customer_id3)\n\n # Add course packages\n self.package1 = self._add_course_package(\"Best Package\", 2, '2021-03-01', '2021-08-02', 50)\n self.package2 = self._add_course_package(\"Medium Package\", 2, '2021-03-01', '2021-08-02', 100)\n self.package3 = self._add_course_package(\"Worst Package\", 2, '2021-03-01', '2021-08-02', 150)\n\n # Buy course packages\n self._buy_package(self.customer_id1, self.package1)\n self._buy_package(self.customer_id2, self.package2)\n self._buy_package(self.customer_id3, self.package3)\n\n # Redeem sessions\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id1)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id2)\n self._register_redeems('2021-01-21', self.course_id2, 1, self.customer_id3)\n\n # Cancel registrations\n self._cancel_registration(self.customer_id1, self.course_id1)\n self._cancel_registration(self.customer_id2, self.course_id2)", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def __init__(self, variables):\n self._variables = variables", "def update_runtime_variables(self) -> None:\n\n self.update_defines()\n self.update_includes()\n self.update_modules()", "def _load_parameter(self):", "def read_variables(self, dataset):\n if 'variables' in self.configs:\n for variable_name, variable_dict in self.configs['variables'].items():\n if variable_name not in dataset.variables:\n temp_var = dataset.createVariable(variable_name, self.configs['variables'][variable_name]['data_type'])\n temp_var[:] = self.configs['variables'][variable_name]['value']\n \n for key, value in variable_dict.items():\n if (key != 'data_type') and (key != 'value'):\n setattr(temp_var, key, value)", "def _init_env_variables(self):\n #raw_input(\"TakeOFF PRESS\")\n # We TakeOff before sending any movement commands\n self.takeoff()\n\n # For Info Purposes\n self.cumulated_reward = 0.0\n # We get the initial pose to mesure the distance from the desired point.\n gt_pose = self.get_gt_pose()\n self.previous_distance_from_des_point = self.get_distance_from_desired_point(\n gt_pose.position)", "def set_locals(self):\n\n if required is not None:\n self.required = set(required)\n if additional_definitions is not None:\n self.additional_definitions = additional_definitions", "def init_tkvars(self):\n\n for key in self.defaultprefs:\n value = self.defaultprefs[key]\n if type(value) is types.IntType:\n var = self.__dict__[key] = IntVar()\n elif type(value) is types.StringType:\n var = self.__dict__[key] = StringVar()\n var.set(value)\n\n self.resnum = IntVar()\n self.resnum.set(1)\n # Method for calculating Tm of primers\n self.Tm_method = StringVar()\n self.Tm_method.set('Stratagene')\n if 'Darwin' in self.currplatform:\n self.seqfontsize.set(16)\n else:\n self.seqfontsize.set(14)\n return", "def load_auditory_model_vars(self, sess):\n self.sess = sess\n for network_key in sorted(self.config_recognition_networks.keys()):\n fn_ckpt = self.config_recognition_networks[network_key]['fn_ckpt']\n saver0 = self.config_recognition_networks[network_key]['saver0']\n saver1 = self.config_recognition_networks[network_key]['saver1']\n print('Loading `{}` variables from {}'.format(network_key, fn_ckpt))\n saver0.restore(self.sess, fn_ckpt)\n saver1.restore(self.sess, fn_ckpt)\n self.vars_loaded = True", "def declare_variables(self):\n\n\t\tvar_prefixes = ['W_in', 'W_rnn', 'b_rnn', 'W_out', 'b_out']\n\t\tself.var_dict = {}\n\n\t\twith tf.variable_scope('network'):\n\t\t\tfor p in var_prefixes:\n\t\t\t\tself.var_dict[p] = tf.get_variable(p, initializer=par[p+'_init'])", "def init(X1, Y1, X2, Y2):\n\n global X1_train\n global Y1_train\n global X2_train\n global Y2_train\n \n X1_train, Y1_train, X2_train, Y2_train = X1, Y1, X2, Y2", "def init_env_variables(self):\n self.total_distance_moved = 0.0\n self.current_y_distance = self.get_y_dir_distance_from_start_point(self.start_point)\n self.cart_current_speed = rospy.get_param('/cart_pole_3d/init_cart_vel')", "def _initialize_project_variables(self):\n self.Source = ''\n self.Regional = ''\n self.Vernacular = ''\n self.Fallback = dict()\n self.New_Target = dict()\n self.Biblical_Terms = dict()\n self.Old_Target = dict()\n\n# self.list_projects = []\n# self.project_lines = []\n# self.indent = 0\n# self.Treed = False\n self.root = etree.Element('root')\n# #add child 'settings', all user configurable bits under here\n self.settings = etree.SubElement(self.root, \"settings\")\n# self.old_mode = dict()\n# self.spreferred = etree.SubElement(self.settings, \"preferred\")\n# self.smode = etree.SubElement(self.settings, \"mode\")\n# self.stemp = etree.SubElement(self.settings, \"template\")\n self.sf0 = etree.SubElement(self.settings, \"f0\")\n self.sf1 = etree.SubElement(self.settings, \"f1\")\n self.sf2 = etree.SubElement(self.settings, \"f2\")\n self.trout = etree.SubElement(self.root, \"tree\")", "def set_variables(self, g_t, m_t):\n self.g_t, self.m_t = g_t, m_t\n return", "def _setup_params(self) -> None:\n self.i = 0 # Year\n self.ela = self.ela_start # Equilibrium line altitude\n self.steady_state = False # Control variable for steady state\n self.fracd8_mode = \"limited\" # Mode of the fracd8 algorithm", "def _initialize_variables(self, finetune: str=None, **kwargs) -> None:\n if finetune is None:\n super()._initialize_variables(**kwargs) # default initialization\n else:\n self._saver = tf.train.Saver(max_to_keep=100000000)\n logging.info('Restoring variables from `%s`', finetune)\n self._saver.restore(self.session, finetune)", "def declareCoreVariables(self, tr, isMC):\n tr.var('run', int, storageType=\"i\")\n tr.var('lumi', int, storageType=\"i\")\n tr.var('evt', int, storageType=\"l\")\n tr.var('isData', int)\n\n # self.triggerBitCheckers = []\n # if hasattr(self.cfg_ana, 'triggerBits'):\n # for T, TL in self.cfg_ana.triggerBits.items():\n # trigVec = ROOT.vector(ROOT.string)()\n # for TP in TL:\n # trigVec.push_back(TP)\n # tr.var( 'HLT_'+T, int )\n# self.triggerBitCheckers.append( (T, TriggerBitChecker(trigVec)) )\n\n if not isMC:\n tr.var('intLumi', int, storageType=\"i\")\n\n if isMC:\n ## cross section\n tr.var('xsec', float)\n ## PU weights\n tr.var(\"puWeight\")\n ## number of true interactions\n tr.var(\"nTrueInt\")\n ## generator weight\n tr.var(\"genWeight\")\n ## PDF weights\n self.pdfWeights = []\n if hasattr(self.cfg_ana, \"PDFWeights\") and len(self.cfg_ana.PDFWeights) > 0:\n self.pdfWeights = self.cfg_ana.PDFWeights\n for (pdf,nvals) in self.pdfWeights:\n if self.scalar:\n for i in range(nvals): tr.var('pdfWeight_%s_%d' % (pdf,i))\n else:\n tr.vector('pdfWeight_%s' % pdf, nvals)", "def _localSetState(self,pdict):\n self.workingDir = pdict.pop('workingDir')\n self.dataFilename = pdict.pop('dataFilename')\n self.functionID = pdict.pop('functionID')\n self.functionType = pdict.pop('functionType')\n self.variableID = pdict.pop('variableID')\n self.k = pdict.pop('k')\n self.s = pdict.pop('s')", "def init():", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def test_load_response_descriptor_variables_library_variable_set_library_variable_set_resource(self):\n pass", "def load_data(self,do_print=True,keys_to_load=[]):\n \n if do_print:\n print\n print 'Loading corr_space data, Id = %s'%self.id\n\n\n data= np.load(self.dataPath,mmap_mode='r')\n \n loaded_keys=[]\n \n if len(keys_to_load)==0:\n for k,v in data.items():\n setattr(self,k,v)\n loaded_keys.append(k)\n else: \n for k in keys_to_load:\n setattr(self,k,data[k])\n loaded_keys.append(k)\n\n \n if do_print:\n print 'Loaded variables: '+' '.join(loaded_keys)", "def load_version_information() -> None:\n to_update = {\"VERSION_MAJOR\", \"VERSION_MINOR\", \"VERSION_PATCH\", \"VERSION_SUFFIX\"}\n with VERSION_FILE.open(\"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n name, _, value = line.strip().partition(\"=\")\n # Don't overwrite random variables by trusting an external file.\n var = name.strip()\n if var in to_update:\n globals()[var] = value.strip()", "def set_variable_values(self, vars_values):\n raise NotImplementedError()", "def __init__(self):\n self.variables = [] # List of all variables in certain scope.\n self.field_id = 0 # Id of next field varibale.\n self.argumen_id = 0 # Id of next argument variable.\n self.local_id = 0 # Id of next local variable.\n self.static_id = 0 # Id of next static variable.", "def _load_state(self, state):\n self._array, self._turn, self._score = state", "def init_game_setting(self):\r\n pass", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def _training_vars(self):\n self.vars = dict()\n # Temperature params\n self.vars['TInit'] = -1\n self.vars['TDecayRate'] = 0.05\n # Bowl params\n self.vars['q_init'] = 16.58 # initial strength for the bowl\n self.vars['q_max'] = 150.\n #self.vars['q_rate'] = 10.\n # Check if we can improve learning, adjusting this value\n self.vars['bowl_center'] = 0.4\n self.vars['bowl_strength'] = None\n self.vars['beta_min_offset'] = 2\n # Time step params\n self.vars['max_dt'] = 0.01\n self.vars['min_dt'] = 0.0005\n self.vars['dt'] = 0.009\n # Training traces\n self.vars['prev_s'] = None\n self.vars['Harmony_trace'] = None\n self.vars['speed_trace'] = None\n self.vars['ema_trace'] = None\n self.vars['lambda_trace'] = None\n self.vars['temp_trace'] = None\n self.vars['TP_trace'] = None\n self.vars['TPnum_trace'] = None\n self.vars['TP_h_trace'] = None\n self.vars['TP_dist_trace'] = None\n self.vars['S_trace'] = None\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.vars:\n self.vars[key] = value", "def load_trainable_variables (self, sess, savefn):\r\n self.state = utils.train.load_trainable_variables(sess, savefn)", "def _setup(self):\n\n # Get user data\n self.symbols = self._get_symbols()\n self.data_dict = self._get_data()\n self.portfolio = self.initialize_portfolio()\n\n if 'slippage' in self.portfolio:\n self.slippage = self.portfolio['slippage']\n else:\n self.slippage = None\n\n # Keep track of all trades\n self.trade_manager = TradeManager(\n self.symbols, self.portfolio, self.sql_config\n )\n\n # Initialize state variables that are updated each iteration\n self.date = None\n self.data = None\n self.symbol = None\n self.currency = None\n self.last_buy = None\n self.num_unresolved = 0\n self.unresolved_trade = False", "def load_settings(self):\n\n self.std = settings.settings", "def test_variables_get(self):\n pass", "def load():\n\n global R, P, NP, update, update_available, region_dict\n\n loader = GoSmartParameterLoader(gosmart._prefix)\n loader.initiate()\n\n R = loader.get_regions()\n P, NP = loader.get_parameters()\n\n region_dict = loader.get_region_dict()\n\n update = gosmart.status.StatusUpdater()\n update_available = update.connect()", "def _initialize_local_and_global_variables(self):\n variables_initialization_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(variables_initialization_op)", "def load(self):\n self.gui.save_params()\n self.init_text.delete('1.0', END)\n self.init_text.insert('1.0', open(\n os.path.join(self.gui.lnp.init_dir, 'init.txt')).read())\n self.d_init_text.delete('1.0', END)\n self.d_init_text.insert('1.0', open(\n os.path.join(self.gui.lnp.init_dir, 'd_init.txt')).read())", "def initialize_dynamic_settings(self):\n self.ship_speed_factor = 1.5\n self.bullet_speed_factor = 3\n self.alien_speed_factor = 1\n self.fleet_direction = 1\n #Puntos\n self.alien_points = 50", "def __init__(self):\n self.posRev = dict()\n self.negRev = dict()\n a = False\n b = False\n if os.path.isfile(\"posRev\"):\n print os.path.isfile(\"posRev\")\n a = self.load(\"posRev\")\n if os.path.isfile(\"negRev\"):\n b = self.load(\"negRev\")\n if a and b:\n self.posRev = a\n self.negRev = b\n else:\n self.train()", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def _setup(self):", "def _setup(self):", "def __init__(self):\n\n self.logger = utils.get_logger()\n\n # set constants\n constants = models.get_asset_dicts('preferences')\n for key, value in constants.items():\n setattr(self, key, value)", "def setup(self):\n\n for name, infos in Rt.geom_dict.items():\n if name in Rt.optim_var_dict:\n self.add_input(name, val=infos[1][0])", "def _manual_setup(self):\n # If self.cache is None, then all caching should be skipped\n if self.name_to_index_dict is None:\n self.name_to_index_dict = {name:i for i, name in enumerate(self.input_fields)}\n self.task_label_index = self.name_to_index_dict[self.task_name]\n if self.cache is not None:\n self.setup_cache()", "def _setup(self) -> None:\n\t\treturn", "def initialize():\n # Ensure user config exists\n install(CONFIG_PATH)\n\n # Load preferences into memory\n get_config()", "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def testTurntableVariables(self):\n crawler = Crawler.create(PathHolder(self.__exrFile))\n self.assertEqual(crawler.var(\"type\"), \"turntable\")\n self.assertEqual(crawler.var(\"category\"), \"render\")\n self.assertEqual(crawler.var(\"renderType\"), \"tt\")\n self.assertEqual(crawler.var(\"assetName\"), \"ass\")\n self.assertEqual(crawler.var(\"step\"), \"lookdev\")\n self.assertEqual(crawler.var(\"pass\"), \"beauty\")\n self.assertEqual(crawler.var(\"renderName\"), \"ass-default-beauty\")", "def setupVariables(self, file, variables, wordsize):\n file.write(self.getStringForVariables(variables, wordsize) + '\\n')\n return", "def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def initialise(self):", "def load_init(self):\n\t\tself.init_et = ET.parse(self.init_in_fn)", "def __init__(self):\n self.statiFile = \"\"\n self.printOrder = []\n self.instCount = 0\n self.initializedVars = {\"GF\":[],\"TF\":[],\"LF\":[]}", "def init_config():\n global udata\n udata = UserConfig()", "def initialize_from_config(self):", "def set_params():\n global module \n global ora_inst\n global response_loc\n\n module_args=dict(\n ora_inst=dict(type='str', required=True),\n response_loc=dict(type='str', required=True)\n )\n\n module=AnsibleModule(\n argument_spec=module_args\n )\n\n ora_inst = module.params['ora_inst']\n response_loc = module.params['response_loc']", "def initVariables(self):\n lines = self.getLines()\n self.__numberOfWords = 0\n wordsSet = set()\n for line in lines:\n self.__numberOfWords += line.count(' ') - 2\n for word in line.split(' ')[0:-2]:\n wordsSet.add(word)\n self.__nbOfLines = len(lines)\n self.__nbUniqueWords = len(wordsSet)\n\n # get time\n anaPath = self.__father.getpath() + self.__father.getidFile() + \".ANA\"\n # if there is no time info for the file\n if not os.path.isfile(anaPath):\n self.__duration = 500 # we put a value not to disturb the mean too much\n return\n f = open(anaPath, \"r\")\n lines = f.readlines()\n self.__duration = math.floor(\n float(lines[-1].split(' ')[6].split('-')[0]) -\n float(lines[0].split(' ')[6].split('-')[1]))/float(10000)\n if self.__duration < 0:\n print(self.__father.getpath() + self.__father.getidFile())", "def _init_storage(self):\n if self._ is None:\n self._ = Parameters(self)", "def initialize(self):\n my_setting = self.settings.get('my_setting')", "def vars_ifile(ifile):\n\n site = None\n year = None\n actual = None\n doy = None\n Ndays = None\n params = None\n Nsteps = None\n models = None\n resolution = None\n fopt = None\n window = None\n tag = None\n photo = None\n plot = None\n project = None\n\n with open(ifile, 'r') as f:\n\n for line in f:\n\n ll = line.strip()\n\n if not ll.startswith(\"#\"):\n ll = ll.rstrip()\n\n if site is None:\n site = read_var('site', ll)\n\n if year is None:\n year = read_var('year', ll)\n\n if actual is None:\n actual = read_var('actual', ll)\n\n if doy is None:\n doy = read_var('doy', ll)\n\n if Ndays is None:\n Ndays = read_var('Ndays', ll)\n\n if params is None:\n params = read_var('params', ll)\n\n if Nsteps is None:\n Nsteps = read_var('Nsteps', ll)\n\n if models is None:\n models = read_var('models', ll)\n\n if resolution is None:\n resolution = read_var('resolution', ll)\n\n if fopt is None:\n fopt = read_var('fopt', ll)\n\n if window is None:\n window = read_var('window', ll)\n\n if tag is None:\n tag = read_var('tag', ll)\n\n if photo is None:\n photo = read_var('photo', ll)\n\n if plot is None:\n plot = read_var('plot', ll)\n\n if project is None:\n project = read_var('project', ll)\n\n if actual is None: # make sure the spinup only runs for the Control\n models = 'Control'\n\n return (site, year, actual, doy, Ndays, params, Nsteps, models, resolution,\n fopt, window, tag, photo, plot, project)", "def test_load_response_descriptor_variables_library_variable_set_library_variable_set_resource_spaces(self):\n pass", "def set_variables(self, scenario, bodies):\n\n if self.tacs_proc:\n # Set the design variable values on the processors that\n # have an instance of TACSAssembler.\n xvec = self.assembler.createDesignVec()\n self.assembler.getDesignVars(xvec)\n xarray = xvec.getArray()\n\n # This assumes that the TACS variables are not distributed and are set\n # only on the tacs_comm root processor.\n if self.tacs_comm.rank == 0:\n for i, var in enumerate(self.struct_variables):\n xarray[i] = var.value\n\n self.assembler.setDesignVars(xvec)\n\n return", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def __init__(self):\n # Select all the environment variables starting with 'ASH_CFG_' and strip\n # off the leading ASH_CFG_ portion to use as the name of the variable.\n self.variables = dict(\n [(x[8:], y) for x, y in os.environ.items() if x.startswith('ASH_CFG_')]\n )", "def set_vars(game_data):\n if team in game_data['home_file_code']:\n we_are = \"home\"\n they_are = \"away\"\n else:\n we_are = \"away\"\n they_are = \"home\"\n\n try:\n our_score = game_data[we_are + \"_team_runs\"]\n their_score = game_data[they_are + \"_team_runs\"]\n except:\n our_score = None\n their_score = None\n\n opponent = game_data[they_are +'_team_name']\n venue = game_data['venue']\n\n return opponent, our_score, their_score, venue", "def initialize_dynamic_settings(self):\n self.ship_speed_factor = 1.5\n self.bullet_speed_factor = 3\n self.alien_speed_factor = 1\n self.fleet_direction = 1", "def __init__(self):\n load_dotenv('.env')\n self.NEWS_API_KEY = os.getenv('NEWS_API_KEY')", "def initialize(self, context):\n self.initialized = True\n properties = context.system_properties\n # Contains the url parameter passed to the load request\n model_dir = properties.get(\"model_dir\") \n gpu_id = properties.get(\"gpu_id\")\n\n # Load Gluonts Model\n self.mx_model = self.load_model(model_dir)", "def do_load_environment(self, *arg):\n print(\"Loading sensors\")\n self.environment = ArduinoSerialMonitor(auto_detect=False)\n self.do_enable_sensor('environment', delay=1)", "def initialise(self):\n self.sc.init.exec_action(self.variables)" ]
[ "0.6802117", "0.67515796", "0.6646807", "0.663246", "0.66175544", "0.64861125", "0.6454291", "0.64139426", "0.63909316", "0.63547635", "0.6338879", "0.62177134", "0.61795104", "0.61608815", "0.6121431", "0.6116283", "0.60934865", "0.60814786", "0.60814786", "0.60814786", "0.60814786", "0.60814786", "0.60814786", "0.6033905", "0.60233176", "0.6008881", "0.59357005", "0.59314585", "0.59190637", "0.5895204", "0.5858852", "0.5858223", "0.58577764", "0.58402777", "0.58339584", "0.58163416", "0.5816253", "0.5811229", "0.58085185", "0.5797447", "0.5778018", "0.5745", "0.57447946", "0.5739783", "0.57376826", "0.5728459", "0.5710173", "0.570863", "0.5680082", "0.56794477", "0.56731117", "0.5672814", "0.56703717", "0.5662487", "0.56596863", "0.5650822", "0.56399304", "0.5635916", "0.5632202", "0.562783", "0.56192786", "0.561836", "0.5610036", "0.5607307", "0.5597616", "0.5592975", "0.55895084", "0.55848074", "0.5573824", "0.5573824", "0.55649865", "0.55645466", "0.55616796", "0.5553054", "0.55406713", "0.5539064", "0.5530891", "0.55214506", "0.55183214", "0.551339", "0.5503427", "0.5495355", "0.54936737", "0.54927826", "0.54851145", "0.54815024", "0.54792017", "0.54747385", "0.5471878", "0.54698753", "0.54653746", "0.54639626", "0.54636526", "0.5463356", "0.54600304", "0.5458851", "0.5457645", "0.5457285", "0.5455575", "0.54526407" ]
0.5848839
33
Sets up biosafe and stores it as an object variable.
def setup_biosafe(self): # Generate dummy data in the right format species_presence = pd.DataFrame( np.random.randint(2, size=len(self.links_law)), columns=['speciesPresence'], index=self.links_law.index) ecotope_area = pd.DataFrame( np.ones(len(self.links_eco2.columns)-1) * 1e5, columns = ['area_m2'], index = self.links_eco2.columns.values[0:-1]) # Simplify ecotope tables to VR ecotopes unique_eco = np.unique( np.hstack((self.vr_eco.ecotope1.values, self.vr_eco.ecotope2.values))) links_eco3 = self.links_eco2.reindex(columns=unique_eco) ecotope_area = ecotope_area.reindex(index=unique_eco) # Run a first version of Biosafe self.bsf_model = bsf.biosafe( self.legal_weights, self.links_law, links_eco3, species_presence, ecotope_area) #PotTax = self.bsf_model.TFI() #PotAll = self.bsf_model.FI() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self):\n self.ae = None", "def __init__(self, barcamp, handler):\n self.barcamp = barcamp\n self.handler = handler\n self.app = self.handler.app\n self.config = self.handler.app.config\n self.user = self.handler.user", "def setup():\n global zb\n # Signal handler (Ctrl+C exit)\n signal.signal(signal.SIGINT, signal_handler) \n # DBus\n session_bus = dbus.SessionBus()\n objXBZB = session_bus.get_object(PROTOCOL_BUS_NAME, PROTOCOL_OBJ_PATH + \"/\" + XBEE_ZB + \"/\" + SOCKET0)\n zb = dbus.Interface(objXBZB, dbus_interface=PROTOCOL_BUS_NAME)", "def basetype_setup(self):\n # the text encoding to use.\n self.db.encoding = \"utf-8\"\n # A basic security setup\n lockstring = \"examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:false()\"\n self.locks.add(lockstring)\n # set the basics of being a bot\n self.cmdset.add_default(BotCmdSet)\n script_key = \"%s\" % self.key\n self.scripts.add(BotStarter, key=script_key)\n self.is_bot = True", "def setup(self):\n self.config = pau.IConfig\n self.session = pau.ISession\n pau.resolve(self)\n\n self.session.assets = Assets()\n self.config.db = self.db_name\n\n self.db = pau.IDb\n pau.resolve(self)\n\n # Instance\n i = Setup()\n pau.resolve(i)\n return i", "def __init__(self):\n # Get a weboob instance\n self.weboob = Weboob()\n self.backend = None", "def setup(client):\n\n client.add_cog(Faq(client))\n print(\"\\tLoaded Faq cog!\")", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def _setup(app_obj):", "def init():", "def setup( self ):", "def memb_init(self):\n self.initialize()", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def boot(self):\n\n pass", "def init(self, sevabot):\n\n logger.debug(\"GiantbombHandler init\")\n self.sevabot = sevabot\n self.skype = sevabot.getSkype()", "def initialise(self):\n self.set_up()", "def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']", "def __init__(self):\n self.load()", "def setup(app):\n # Register builder.\n app.add_builder(BeamerBuilder)\n\n # Add setting for allowframebreaks.\n app.add_config_value(\"beamer_allowframebreaks\", True, \"beamer\")\n # Add setting for Beamer theme.\n app.add_config_value(\"beamer_theme\", \"Warsaw\", \"beamer\")\n # Adjust titles upon doctree-resolved.\n app.connect(\"doctree-resolved\", adjust_titles)\n\n return {\n \"version\": \"1.0\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def __init__(self):\n pygame.init()\n\n self.settings = Settings()\n\n self.screen = pygame.display.set_mode(\n (self.settings.screen_width, self.settings.screen_height))\n pygame.display.set_caption(\"52 Card Trick\")\n self.CardSet = CardSet(self)", "def __init_euca(self):\n if self.euca:\n return\n self.euca = Euca2ool()", "def setup_game(self):", "def _setup(self) -> None:\n\t\treturn", "def _init_objects(self) -> None:\n self.position = selectors.get_position(self.exchange, self.symbol)\n self.broker = Broker(self.position, self.exchange, self.symbol, self.timeframe)\n\n if self.hp is None and len(self.hyperparameters()) > 0:\n self.hp = {}\n for dna in self.hyperparameters():\n self.hp[dna['name']] = dna['default']", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def setup(self) -> None:", "def init():\n try:\n compile_contract(\n \"fishcake\", f\"Fishcake(sp.address('{pub_key_hash}'),{default_supply})\")\n fishcake_addr = deploy(\"fishcake\")\n print(\"\\n\")\n compile_contract(\n \"fishcakeBox\", f\"FishcakeBox({default_redeem_amt}, sp.address('{fishcake_addr}'))\")\n fishcake_box_addr = deploy(\"fishcakeBox\")\n setup(fishcake_addr, fishcake_box_addr)\n print(\"\\n\\n[!] Details :\\n\")\n print(f\"-- Fishcake Token Address : {fishcake_addr}\")\n print(f\"-- Fishcake Box Address : {fishcake_box_addr}\")\n except Exception as e:\n print(\"Failed to originate Contracts : \", e)", "def setUpClass(cls):\n cls.user = User()\n cls.user.first_name = \"Kev\"\n cls.user.last_name = \"Yo\"\n cls.user.email = \"1234@yahoo.com\"\n cls.storage = FileStorage()\n cls.console = HBNBCommand()", "def __init__(self, temboo_session):\n super(Image, self).__init__(temboo_session, '/Library/Freebase/Image')", "def init_bounce(self, bounce):\n self.can_bounce = bounce", "def startup(self) -> None:", "def __init__(self, *args, offset=0, **kwargs):\n super(BF, self).__init__(*args,\n arch=arch_from_id(\"bf\"),\n offset=offset,\n entry_point=0,\n **kwargs)\n self.os = \"bf\"", "def __initialize(self):\n self.__object = None\n \n self.__mainAct = None\n self.__mainMenu = None\n \n self.__e5project = e5App().getObject(\"Project\")\n \n self.__supportedVariants = []", "def init():\n safe_call(backend.get().af_init())", "def setUp(self):\n self.box = Box({}, 'storehouse_test', 'box')", "def __init__(self):\n self.setup_called = False", "def _setup(self):", "def _setup(self):", "def set_bomb(self):\n self.bomba = True", "def setup(self):\n # Set up the player\n self.player_sprite = arcade.Sprite(\"Sprites/Jugador/Jugador.jpg\", SPRITE_SCALING)\n self.player_sprite.center_x = 100\n self.player_sprite.center_y = 100\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n # Listado de habitaciones\n self.rooms = []\n self.rooms.append(setup_pueblo())\n\n #Contador de habitación\n self.current_room = 0\n\n #Fisicas\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)", "def __init__(self):\n self._book = Library.functions.new_()", "def __init__(self,exitOnError=True):\n self.femb = None\n self.NASICS = 1\n self.NBOARDS = 1\n self.COLD = False\n self.exitOnError=exitOnError", "def _swift_saio_setup(self):\n self._swift_storage_setup()\n self._swift_proxy_setup()", "def setup(self):\n\t\tpass", "def __init__(self, c):\n from modules.Helpers import Helpers\n from modules.Weaviate import Weaviate\n self.config = c\n self.helpers = Helpers(c)\n self.weaviate = Weaviate(c)", "def setup_object(obj):\n for key, conf, value in obj.get_config_vars():\n obj[key] = raw_input_default_config(conf, default=value, obj=obj)", "def do_init(self):\n\n pass", "def __init_on_load__(self):", "def __init__(self):\r\n\t\t# Publishers\r\n\t\tself._pub_rate = rospy.Publisher('robot/joint_state_publish_rate', UInt16, queue_size=10)\r\n\t\tself.image_pub = rospy.Publisher(\"baxter_view\",Image,queue_size=4)\r\n\t\tself._obj_state = rospy.ServiceProxy(\"/gazebo/set_model_state\",SetModelState)\r\n\t\t\r\n\t\t# Link with baxter interface\r\n\t\tself._left_arm = baxter_interface.limb.Limb(\"left\")\r\n\t\tself._right_arm = baxter_interface.limb.Limb(\"right\")\r\n\t\tself._left_joint_names = self._left_arm.joint_names()\r\n\t\tself.grip_left = baxter_interface.Gripper('left', CHECK_VERSION)\r\n\r\n\t\tprint(\"Getting robot state... \")\r\n\t\tself._rs = baxter_interface.RobotEnable(CHECK_VERSION)\r\n\t\tself._init_state = self._rs.state().enabled\r\n\t\tprint(\"Enabling robot... \")\r\n\t\tself._rs.enable()\r\n\t\t\r\n\t\t# Control parameters\r\n\t\tself._rate = 500.0 # Hz\r\n\t\tself._pub_rate.publish(self._rate)\r\n\t\tself.bridge = CvBridge()\r\n\t\tself._left_arm.set_joint_position_speed(0.3)\r\n\t\tself._object_type = 0\r\n\t\tself.object_position = Point(x=0.0, y=0.0, z=0.0)\r\n\t\tself.object_v = 0.0", "def user_init(self):\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def _setup_object(self) -> object:\n raise NotImplementedError", "def __init__(self):\n self.read_input()\n self.update_binaries()", "def on_load(self):\n self.__init__()", "def init(self):", "def init(self):", "def setup(self):\r\n pass", "def init(self) -> None:", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def setUp(self):\n self.business_item_class = BusinessesClass()", "def _set_kb_attrs(flush_all=True):\n\tdebug_msg = \"initializing the knowledge base\"\n\tlogger.debug(debug_msg)\n\n\tkb.apk = AttribDict()\n\tkb.apk.md5 = None\n\tkb.apk.file_size = None\n\tkb.apk.display_perm = []\n\tkb.apk.providers = []\n\tkb.apk.receivers = []\n\tkb.apk.services = []\n\tkb.apk.activities = []\n\tkb.apk.actions = []\n\tkb.apk.manifest = None\n\t\n\tif flush_all:\n\t\tkb.cache = AttribDict()\n\t\tkb.cache.regex = {}\n\t\tkb.cache.files = {}\n\t\tkb.targets = set()\n\t\tkb.heartbeat = None\n\t\tkb.storage = None\n\t\tkb.plugins = AttribDict()\n\t\tkb.plugins.handle = []", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n self.bag = {}", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def init():\n pass", "def __init__():\n self.placa = placa", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def initialise(self):", "def store_biosafe_output(self, data, reference=False, percentage=False):\n if percentage:\n self.biosafe_percentage = data\n elif reference:\n self.biosafe_reference = data\n else:\n self.biosafe_intervention = data\n return", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def startup(self):\n pass", "def setUp(self):\n self.display = StubDisplay()", "def __init__(self):\n self.cap = None\n self.frm = None", "def setup(self):\n pass" ]
[ "0.56603616", "0.5621632", "0.55927515", "0.5504273", "0.53797036", "0.5317156", "0.5243224", "0.52396697", "0.52148014", "0.5166802", "0.51143235", "0.51060176", "0.50999767", "0.50967854", "0.50958705", "0.50950307", "0.50940466", "0.5094038", "0.50789654", "0.5071926", "0.50698096", "0.5066861", "0.5062106", "0.5058001", "0.50500965", "0.50406635", "0.50365543", "0.5031632", "0.5030729", "0.50294065", "0.5025457", "0.50034136", "0.49989155", "0.49969935", "0.49916312", "0.498001", "0.497721", "0.4975477", "0.4975477", "0.49659827", "0.49528024", "0.49438155", "0.493601", "0.49324113", "0.49282587", "0.49212098", "0.49190113", "0.49173766", "0.49156547", "0.4915378", "0.4914943", "0.491337", "0.491337", "0.491337", "0.49111193", "0.48942223", "0.48902938", "0.48854312", "0.48854312", "0.48836756", "0.48790395", "0.4878103", "0.48717627", "0.48690772", "0.48685563", "0.48685563", "0.48685563", "0.48685563", "0.48685563", "0.48685563", "0.48685563", "0.48685563", "0.48618948", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48594782", "0.48556927", "0.48556927", "0.48556927", "0.48556927", "0.48407987", "0.48404512", "0.48264006", "0.48260528", "0.48248824", "0.482317", "0.48187596", "0.4817431", "0.4817228", "0.48167574", "0.48157528" ]
0.6278834
0
Calculate the total area of all ecotopes on the playing board.
def ecotope_area_sums(self, board): # clean up the input and merge into a single dataframe cols = ['geometry', 'z_reference', 'landuse', 'biosafe'] board_clean = board.loc[board.biosafe, cols] board_eco = pd.merge(board_clean, self.vr_eco, on=['z_reference', 'landuse']) # optional: output gdf to shp # gdf = board_eco.copy() # gdf['biosafe'] = gdf.biosafe.values.astype('int') # gdf.to_file('board_eco.shp') # calculate the total area of all columns # note: landuse-z_reference combinations not in vr_ecotopes are # excluded area_eco1 = board_eco.groupby('ecotope1').sum() area_eco2 = board_eco.groupby('ecotope2').sum() area_fractions = pd.concat([area_eco1.fraction1, area_eco2.fraction2], axis=1, sort=True) area_total = area_fractions.fillna(0).sum(axis=1).reset_index() area_total.columns = ['ecotope', 'area_m2'] # assert that that total area of the ecotopes matches the biosafe # hexagons try: assert int(area_total.sum().area_m2) == int(board_clean.shape[0]),\ ("ERROR: There appears to be one or more polygons that is not " + "detected correctly, resulting in a missmatch of the VR ecotopes") except AssertionError as error: print(error) pass area_out = area_total.set_index('ecotope') area_out.index.name=None return area_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def total_area(self) :\n area = 0\n for i in self.residues :\n area += i.solvent_acc_area\n return area", "def calculatearea(self):\r\n return self.width * self.height", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def calculate_area(building, pixel_size=1):\n return len(building.points) * (pixel_size**2)", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def total_area(self):\n return self._total_area", "def area(self):\n area = self.__size * self.__size\n return(area)", "def area(self):\n area = self.__size * self.__size\n return area", "def area(self):\n\t\treturn self.height * self.height", "def area(self):\n return self.__size ** 2", "def getArea(self):\r\n return np.sum(self.array[:])", "def area(self):\n\t\treturn self.width * self.height", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n\t\treturn self.width() * self.height()", "def area(self, by_spec=False):\n if by_spec:\n cell_area = {}\n for element in self.elements:\n element_area = element.area(True)\n for ll in element_area.keys():\n if ll in cell_area:\n cell_area[ll] += element_area[ll]\n else:\n cell_area[ll] = element_area[ll]\n else:\n cell_area = 0\n for element in self.elements:\n cell_area += element.area()\n return cell_area", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return self.width*self.height", "def area(self):\n return int(self.__size) * int(self.__size)", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n return(self.__width * self.__height)", "def area(self):\n area = self.__width * self.__height\n return area", "def area(self):\n return self._width * self._height", "def calculate(self):\n\n return self._calculate_area(self.ground_truth, self.slice_number)", "def area(self):\n return (self.width * self.height)", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return self.width() * self.height()", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.length*self.length", "def get_artif_area(self):\n result = self.cities.all().aggregate(total=Sum(\"surface_artif\"))\n return result[\"total\"] or 0", "def area(self):\r\n return self.width * self.height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area", "def area(self):\n return numpy.prod(\n numpy.meshgrid(*self.binwidths, indexing='ij'), axis=0)", "def area(self):\n return (self.__width * self.__height)", "def area(self):\n return (self.__width * self.__height)", "def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")", "def area(self):\n\n return self.__width * self.__height", "def area(self):\n\n return self.__width * self.__height", "def area(self):\n\n return self.__width * self.__height", "def area(self) -> float:\n raise NotImplementedError", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area(self):\n area = self.__length * self.__width\n\n return area", "def free_area_score(game, player, e=2):\n\n y, x = game.get_player_location(player)\n\n spaces = game.get_blank_spaces()\n data = [1 for sy, sx in spaces if (sx - x) + (sy - y) <= e]\n res = sum(data)\n\n return res", "def calculate_area(boxes):\n box_dimension = len(boxes.size())\n if (box_dimension == 1) and (boxes.size()[0] != 0):\n return (boxes[3] - boxes[1] + 1) * (boxes[2] - boxes[0] + 1)\n elif box_dimension == 2:\n return (boxes[:, 3] - boxes[:, 1] + 1) * (boxes[:, 2] - boxes[:, 0] + 1)\n else:\n return torch.tensor([])", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def score(self, board: Block) -> int:\n grid = _flatten(board)\n left = grid[0]\n right = grid[-1]\n top = [i[0] for i in grid]\n bottom = [i[-1] for i in grid]\n score0 = left.count(self.colour)\n score1 = right.count(self.colour)\n score2 = top.count(self.colour)\n score3 = bottom.count(self.colour)\n return score0 + score1 + score2 + score3", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def area(self, by_spec=False):\n if by_spec:\n cell_area = {}\n for element in itertools.chain(self.polygons, self.paths, self.references):\n element_area = element.area(True)\n for ll in element_area.keys():\n if ll in cell_area:\n cell_area[ll] += element_area[ll]\n else:\n cell_area[ll] = element_area[ll]\n else:\n cell_area = 0\n for element in itertools.chain(self.polygons, self.paths, self.references):\n cell_area += element.area()\n return cell_area", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def score(self, board: Block) -> int:\r\n score = 0\r\n flat = _flatten(board)\r\n\r\n perimeter = []\r\n perimeter.extend(flat[0][1:-1])\r\n perimeter.extend(flat[-1][1:-1])\r\n for i in range(1, len(flat) - 1):\r\n perimeter.append(flat[i][0])\r\n perimeter.append(flat[i][-1])\r\n\r\n if flat[0][0] == self.colour:\r\n score += 2\r\n if flat[0][-1] == self.colour:\r\n score += 2\r\n if flat[-1][0] == self.colour:\r\n score += 2\r\n if flat[-1][-1] == self.colour:\r\n score += 2\r\n\r\n for element in perimeter:\r\n if element == self.colour:\r\n score += 1\r\n\r\n return score", "def number_total(self):\n return sum(self.grid[pos][1] for pos in [\"n1\", \"n2\", \"n3\", \"n4\", \"n5\", \"n6\"] if self.grid[pos][0])", "def area(self):\n\n return (self.x1 - self.x0) * (self.y1 - self.y0)", "def area(self):\n return self.radius*self.radius*math.pi", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def area(self):\n area = 0.25*self._sides*self._length**2 / math.tan(math.radians(180/self._sides))\n return float('{:.2f}'.format(area))", "def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea", "def cells_total(self):\n return self._inv.get(\"cells\", len(self))", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def area(self):\n return math.pi * self._r ** 2", "def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def area(self):\n return math.pi*self._radius*self._radius", "def getArea(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * math.pow(self.radius, 2)" ]
[ "0.72191346", "0.69285345", "0.6818615", "0.6791541", "0.6719441", "0.6589277", "0.6585048", "0.6579154", "0.65724313", "0.65674067", "0.6542835", "0.651549", "0.6497927", "0.64875937", "0.64872205", "0.6446514", "0.6436132", "0.6436132", "0.6436132", "0.6436132", "0.6428201", "0.6425329", "0.6424015", "0.6424015", "0.6424015", "0.6424015", "0.64212507", "0.64013994", "0.64013994", "0.64013994", "0.6400678", "0.6399945", "0.6398231", "0.6396636", "0.6394234", "0.6382829", "0.6373582", "0.6371132", "0.63665247", "0.63665247", "0.63665247", "0.63665247", "0.63665247", "0.63665247", "0.63665247", "0.63665247", "0.6364283", "0.6364283", "0.6364283", "0.6361426", "0.63549876", "0.63549876", "0.63549876", "0.63503313", "0.63485736", "0.6339642", "0.6338373", "0.6338373", "0.6338373", "0.6338373", "0.6338373", "0.63051856", "0.6300306", "0.6279877", "0.6275589", "0.6275589", "0.6238375", "0.62278914", "0.62278914", "0.62278914", "0.62079465", "0.6203513", "0.61943734", "0.61900896", "0.6189099", "0.6181866", "0.6152864", "0.614875", "0.614656", "0.6108651", "0.6086204", "0.60804343", "0.6054138", "0.6054138", "0.60540605", "0.6033029", "0.60261476", "0.6016985", "0.60159206", "0.6007956", "0.5983828", "0.59752595", "0.59597737", "0.5958772", "0.59586847", "0.5945014", "0.59356594", "0.5934218", "0.5929964", "0.5896248" ]
0.7589144
0
Function that processes the current board (including the initial board at the start of the Virtual River game).
def process_board(self, hexagons, reference=False): # Input data Virtual River board = gpd.GeoDataFrame.from_features(hexagons.features) if reference: self.board_reference = board else: self.board_intervention = board # Evaluate the board eco_area = self.ecotope_area_sums(board) self.bsf_model.ecotopeArea = eco_area PotTax = self.bsf_model.TFI() if reference: self.PotTax_reference = PotTax else: self.PotTax_intervention = PotTax return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initBoard(self):\n pass", "def set_board(board):", "def advance(self, board):", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def main():\n # each square in the board is assigned a label (1a-3c)\n board_values = deepcopy(c.INITIAL_BOARD_VALUES)\n\n print_welcome_message(board_values)\n\n winner = None\n current_player = None\n while winner is None:\n # current player is either \"X\" or \"O\"\n current_player = get_next_player(current_player)\n\n # ask the current player to choose a square\n chosen_square = get_next_move(current_player, board_values)\n\n # update the board, show it, and check for a winner or a full board\n board_values[chosen_square] = current_player\n print_board(board_values)\n winner = get_winner(board_values)\n\n print(get_final_message(winner))", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def advance_board(self):\n raise NotImplementedError", "def handle_game_start(self, color, board):\n\n\n self.color = color\n self.current_board = board\n pass", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def place_worker(self, cur_board):\n pass", "def start_state():\n return chess.Board()", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def __init__(self, initial_board):\n self.initial_board = initial_board", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def display_board(self, board):\n\n print(\"\\n\\t - A - B - C - D - E - F - G - H - \\n\")\n print(\"\\t8 \", board[56], \"|\", board[57], \"|\", board[58], \"|\", board[59], \"|\", board[60], \"|\", board[61], \"|\", board[62], \"|\", board[63])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t7 \", board[48], \"|\", board[49], \"|\", board[50], \"|\", board[51], \"|\", board[52], \"|\", board[53], \"|\", board[54], \"|\", board[55])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t6 \", board[40], \"|\", board[41], \"|\", board[42], \"|\", board[43], \"|\", board[44], \"|\", board[45], \"|\", board[46], \"|\", board[47])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t5 \", board[32], \"|\", board[33], \"|\", board[34], \"|\", board[35], \"|\", board[36], \"|\", board[37], \"|\", board[38], \"|\", board[39])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t4 \", board[24], \"|\", board[25], \"|\", board[26], \"|\", board[27], \"|\", board[28], \"|\", board[29], \"|\", board[30], \"|\", board[31])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t3 \", board[16], \"|\", board[17], \"|\", board[18], \"|\", board[19], \"|\", board[20], \"|\", board[21], \"|\", board[22], \"|\", board[23])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t2 \", board[8], \"|\", board[9], \"|\", board[10], \"|\", board[11], \"|\", board[12], \"|\", board[13], \"|\", board[14], \"|\", board[15])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t1 \", board[0], \"|\", board[1], \"|\", board[2], \"|\", board[3], \"|\", board[4], \"|\", board[5], \"|\", board[6], \"|\", board[7])\n print(\"\\n\\t - A - B - C - D - E - F - G - H - \\n\")", "def on_new_board(self) -> None:\r\n\r\n self.stop_animation()\r\n\r\n self.board = self.empty_board()\r\n self.anim_board = self.empty_board()\r\n\r\n self.init_new_board()\r\n self.painter.draw_board()", "def __init__(self, board):\n self.running = True\n self.state = \"waiting\"\n pygame.init()\n pygame.display.set_caption(\"Sudoku Solver\")\n\n self.define_grid()\n self.define_number_positions()\n self.define_button()\n self.board = board\n self.font = pygame.font.Font('ubuntu.ttf', NUMBERS_SIZE)\n self.sleep_time = 1 / CHANGES_PER_SECOND\n\n self.original_board = board.copy()", "def resetBoard(self):\n pass", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def setupBoard(self):\n\t\tfor x in range(8):\n\t\t\tfor y in range(8):\n\t\t\t\tif x % 2 == 0:\n\t\t\t\t\tif y % 2 == 0:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.BlackSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"b\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\t\telif y % 2 == 1:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.WhiteSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"w\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\tif x % 2 == 1:\n\t\t\t\t\tif y % 2 == 1:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.BlackSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"b\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\t\telif y % 2 == 0:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.WhiteSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"w\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)", "def main():\n\n # Initializes all game variables.\n gameOver = False\n winner = False\n gameBoard = emptyBoard()\n\n # Randomly fills two tiles to start with.\n for i in range(0, 2):\n gameBoard = fillEmpty(gameBoard)\n\n # Debugging/testing out different cases.\n # gameBoard = [[0, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n\n # gameBoard[0][0] = 1024\n # gameBoard[1][0] = 1024\n\n # gameBoard[0] = [2, 2, 2, 2]\n \n # gameBoard[0][0] = 2\n # gameBoard[1][0] = 2\n # gameBoard[2][0] = 4\n # gameBoard[3][0] = 4\n\n # Runs the game loop.\n while not gameOver:\n # Sets the frame rate and launches the game in its default state.\n CLOCK.tick(FPS)\n move = ''\n change = False\n displayBoard(gameBoard)\n\n # Read the player's button inputs.\n for event in pygame.event.get():\n if(event.type == pygame.KEYDOWN) & (move == ''):\n if(event.key == pygame.K_UP):\n move = 'u'\n if(event.key == pygame.K_DOWN):\n move = 'd'\n if(event.key == pygame.K_LEFT):\n move = 'l'\n if(event.key == pygame.K_RIGHT):\n move = 'r'\n if(event.type == pygame.QUIT):\n pygame.quit()\n return\n\n if(move == 'r'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles to the right.\n for i in range(0, 4):\n newRow = []\n empty = 0\n\n for col in gameBoard[i]:\n if col > 0:\n newRow.append(col)\n empty += 1\n\n for n in range(0, 4 - empty):\n newRow.insert(0, 0)\n\n for n in range(0, 4):\n gameBoard[i][n] = newRow[n]\n\n if(newRow[0] == newRow[1] == newRow[2] == newRow[3]):\n doubCheck = True\n checkType = False\n elif(newRow[0] == newRow[1]) & (newRow[2] == newRow[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(2, -1, -1):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n elif(gameBoard[i][n+1] == 0):\n gameBoard[i][n+1] = gameBoard[i][n]\n gameBoard[i][n] = 0\n \n if doubCheck:\n if checkType:\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n else:\n for n in range(2, -1, -1):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n+1] *= 2\n gameBoard[i][n] = 0\n elif(gameBoard[i][n+1] == 0):\n gameBoard[i][n+1] = gameBoard[i][n]\n gameBoard[i][n] = 0\n\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'l'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles to the left.\n for i in range(0, 4):\n newRow = []\n empty = 0\n\n for col in gameBoard[i]:\n if col > 0:\n newRow.append(col)\n empty += 1\n\n for n in range(0, 4 - empty):\n newRow.append(0)\n\n for n in range(0, 4):\n gameBoard[i][n] = newRow[n]\n\n if(newRow[0] == newRow[1] == newRow[2] == newRow[3]):\n doubCheck = True\n checkType = False\n elif(newRow[0] == newRow[1]) & (newRow[2] == newRow[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n] *= 2\n gameBoard[i][n+1] = 0\n elif(gameBoard[i][n] == 0):\n gameBoard[i][n] = gameBoard[i][n+1]\n gameBoard[i][n+1] = 0\n\n if doubCheck:\n if checkType:\n for n in range(3, 0, -1):\n if(gameBoard[i][n] == gameBoard[i][n-1]):\n gameBoard[i][n-1] *= 2\n gameBoard[i][n] = 0\n else:\n for n in range(0, 3):\n if(gameBoard[i][n] == gameBoard[i][n+1]):\n gameBoard[i][n] *= 2\n gameBoard[i][n+1] = 0\n elif(gameBoard[i][n] == 0):\n gameBoard[i][n] = gameBoard[i][n+1]\n gameBoard[i][n+1] = 0\n\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'd'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles downward.\n for i in range(0, 4):\n newCol = []\n empty = 0\n\n for j in range(0, 4):\n if gameBoard[j][i] > 0:\n newCol.append(gameBoard[j][i])\n empty += 1\n\n for n in range(0, 4 - empty):\n newCol.insert(0, 0)\n\n for n in range(0, 4):\n gameBoard[n][i] = newCol[n]\n\n if(newCol[0] == newCol[1] == newCol[2] == newCol[3]):\n doubCheck = True\n checkType = False\n elif(newCol[0] == newCol[1]) & (newCol[2] == newCol[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(2, -1, -1):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n elif(gameBoard[n+1][i] == 0):\n gameBoard[n+1][i] = gameBoard[n][i]\n gameBoard[n][i] = 0\n\n if doubCheck:\n if checkType:\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n else:\n for n in range(2, -1, -1):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n+1][i] *= 2\n gameBoard[n][i] = 0\n elif(gameBoard[n+1][i] == 0):\n gameBoard[n+1][i] = gameBoard[n][i]\n gameBoard[n][i] = 0\n \n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard\n elif(move == 'u'):\n # Initial values of the board before movement.\n oldBoard = emptyBoard()\n\n for i in range(0, 4):\n for j in range(0, 4):\n oldBoard[i][j] = gameBoard[i][j]\n\n # Shifts all tiles upward.\n for i in range(0, 4):\n newCol = []\n empty = 0\n\n for j in range(0, 4):\n if gameBoard[j][i] > 0:\n newCol.append(gameBoard[j][i])\n empty += 1\n\n for n in range(0, 4 - empty):\n newCol.append(0)\n\n for n in range(0, 4):\n gameBoard[n][i] = newCol[n]\n\n\n if(newCol[0] == newCol[1] == newCol[2] == newCol[3]):\n doubCheck = True\n checkType = False\n elif(newCol[0] == newCol[1]) & (newCol[2] == newCol[3]):\n doubCheck = True\n checkType = True\n else:\n doubCheck = False\n\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n][i] *= 2\n gameBoard[n+1][i] = 0\n elif(gameBoard[n][i] == 0):\n gameBoard[n][i] = gameBoard[n+1][i]\n gameBoard[n+1][i] = 0\n\n if doubCheck:\n if checkType:\n for n in range(3, 0, -1):\n if(gameBoard[n][i] == gameBoard[n-1][i]):\n gameBoard[n-1][i] *= 2\n gameBoard[n][i] = 0\n else:\n for n in range(0, 3):\n if(gameBoard[n][i] == gameBoard[n+1][i]):\n gameBoard[n][i] *= 2\n gameBoard[n+1][i] = 0\n elif(gameBoard[n][i] == 0):\n gameBoard[n][i] = gameBoard[n+1][i]\n gameBoard[n+1][i] = 0\n\n # Checks for changes pre- and post-movement\n change = gameBoard != oldBoard \n\n # Checks if the player won the game.\n winner = checkWon(gameBoard)\n # Checks if the game is over before the next iteration of the game loop.\n gameOver = checkGameOver(gameBoard)\n\n # Fills the game board if the game is continued and the board has shifted.\n if(not (gameOver | winner)):\n if(change) & (move != ''):\n gameBoard = fillEmpty(gameBoard)\n else:\n # Displays the game board's final state.\n displayBoard(gameBoard)\n break\n \n # Loads the final message to the player.\n pygame.time.delay(10)\n font = pygame.font.SysFont('comic sans ms', 64)\n whatNow = False\n\n while not whatNow:\n if(gameOver):\n message = \"Game Over!\"\n t_color = SIXTEEN\n b_color = BLACK\n elif(winner):\n message = \"You won!\"\n t_color = BLACK\n b_color = GREEN\n\n # Displays the loaded message to screen.\n messageOutline = pygame.Rect(0, H//2 - 64, W, 128)\n endMessage = font.render(message, 0, t_color)\n pygame.draw.rect(WINDOW, b_color, messageOutline)\n WINDOW.blit(endMessage, (512//2 - len(message)*17, 512//2 - 48))\n pygame.display.update()\n\n # Lets the player choose to continue or quit after the game results.\n for event in pygame.event.get():\n if(event.type == pygame.KEYDOWN):\n whatNow = True\n if(event.type == pygame.QUIT):\n pygame.quit()\n return\n\n # Restart game if the player doesn't choose to quit.\n main()", "def advance_board(self):\n board = self.board\n rules = self.energy_rules\n h, w = board.shape\n beta = 1.0 / max(1e-20, self.temperature)\n if len(rules[0]) - 1 == 4:\n neighborhood = np.array([[0,1,0],[1,0,1],[0,1,0]])\n elif len(rules[0]) - 1 == 6:\n neighborhood = np.array([[0,1,1],[1,0,1],[1,1,0]])\n elif len(rules[0]) - 1 == 8:\n neighborhood = np.array([[1,1,1],[1,0,1],[1,1,1]])\n else:\n raise RuntimeError(\"async rules must have length 5, 7, or 9\")\n rng = get_rng()\n for _ in range(int(board.size * self.cells_per_update)):\n x = rng.choice(w)\n y = rng.choice(h)\n if board[y, x] & CellTypes.frozen:\n continue\n neighbors = board.view(wrapping_array)[y-1:y+2, x-1:x+2] * neighborhood\n alive_neighbors = np.sum(neighbors & CellTypes.alive > 0)\n spawn_neighbors = np.sum(neighbors & CellTypes.spawning > 0)\n frozen = np.sum(neighbors & CellTypes.freezing) > 0\n if frozen:\n continue\n if board[y, x] & CellTypes.alive:\n H = rules[0][alive_neighbors]\n else:\n H = rules[1][alive_neighbors]\n\n P = 0.5 + 0.5*np.tanh(H * beta)\n P = 1 - (1-P)*(1-self.spawn_prob)**spawn_neighbors\n board[y, x] = CellTypes.life if coinflip(P) else CellTypes.empty", "def initial_board():\n board = [OUTER] * 100\n for i in Othello.squares():\n board[i] = EMPTY\n # The middle four squares should hold the initial piece positions.\n board[44], board[45] = BLACK, WHITE\n board[54], board[55] = WHITE, BLACK\n return board", "def main():\n \n is_white_move = True\n board = initial_state()\n print_board(board)\n \n while True:\n if is_white_move == True:\n print()\n result = str(input(\"White's move: \"))\n else:\n print()\n result = str(input(\"Black's move: \"))\n\n if result == 'h' or result == 'H':\n print(HELP_MESSAGE)\n print_board(board)\n elif result == 'q' or result == 'Q':\n confirm_quit = str(input(\"Are you sure you want to quit? \"))\n if confirm_quit == 'y' or confirm_quit == \"Y\":\n break\n else:\n print_board(board) \n\n else:\n if valid_move_format(result) == False:\n print('Invalid move')\n print()\n print_board(board)\n else:\n move = process_move(result)\n if is_move_valid(move, board, is_white_move): \n board = update_board(board, move)\n print_board(board)\n is_white_move = not is_white_move\n if check_game_over(board, is_white_move):\n break\n else:\n print('Invalid move')\n print()\n print_board(board)", "def visualize_board():\n count = 0\n cars = {}\n cmap = colors.ListedColormap(['white','purple', 'brown', 'red', 'pink', 'black', 'beige', 'yellow', 'turquoise', 'coral', 'grey', 'navy', 'indigo', 'cyan', 'olive', 'maroon', 'silver', 'lime', 'teal', 'tan', 'aquamarine', 'violet', 'magenta', 'chartreuse', 'azure', 'gold', 'plum', 'ivory'])\n \n # Creates board object\n board = Board(\"data/Rushhour6x6_new_19_steps_15_cars.csv\")\n print(board)\n \n # Creates begin state\n my_board = np.zeros((board.length, board.length))\n my_board = create_init(board, my_board, cars)\n \n # # Creates state created by each step\n # for step in steps:\n # for car in board.cars.values():\n # if car.name == step[0]:\n # request_car = car\n # board.move(request_car, step[1])\n my_board = create_numpy(board, my_board, cars)\n #\n # Visualizes numpy list\n fig = plt.gcf()\n im = plt.imshow(my_board, cmap=cmap, animated=True)\n # Saves visualisation\n plt.savefig(fname=f'screenshots/creator/{count}Rush_hour6x6', dpi=150)\n\n count += 1", "def _board(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.send_line('STATUS BOARD %s' % repr(self.game))", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def get_board(self):\n pass", "def __init__(self, python_board: list[list[int]] = None, red_active: bool = True) -> None:\n\n game_board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n if python_board is not None:\n self.board_array = np.array(python_board)\n else:\n self.board_array = np.array(game_board)\n\n self.move_number = 0\n\n # Creating the kernels to use in a 2d convolution to check the board for a winner later\n across = np.array([[1, 1, 1, 1]])\n vertical = np.transpose(across)\n main_diagonal = np.eye(4, dtype=np.uint8)\n off_diagonal = np.fliplr(main_diagonal)\n self._detection_kernels_red = [across, vertical, main_diagonal, off_diagonal]\n self._detection_kernels_yellow = [kernel * -1 for kernel in self._detection_kernels_red]\n\n self._is_red_active = red_active\n\n # Matches moves to their indices in self._valid_moves, this order is very important\n # for optimising alpha-beta pruning\n self._valid_move_order = {3: 0, 2: 1, 4: 2, 5: 3, 1: 4, 0: 5, 6: 6}\n self._valid_moves = [3, 2, 4, 5, 1, 0, 6]\n self._column_to_row = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n\n self._win_state = None\n\n # This code reads in the hash keys for use in Zobrist hashing, for more information, see\n # opening_book_gen.py\n red_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_red_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n red_hash_keys.append([int(r) for r in row])\n self._red_hash_keys = np.array(red_hash_keys)\n\n yellow_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_yellow_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n yellow_hash_keys.append([int(r) for r in row])\n self._yellow_hash_keys = np.array(yellow_hash_keys)\n\n self.hash = 0", "def __iterate(self):\n\t\tnext_board = []\n\n\t\tfor y, row in enumerate(self.__board):\n\t\t\tnext_board.append([])\n\n\t\t\tfor x, cell in enumerate(row):\n\t\t\t\tneighbors = [\n\t\t\t\t\tself.__get_cell_state(y - 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y - 1, x),\n\t\t\t\t\tself.__get_cell_state(y - 1, x + 1),\n\t\t\t\t\tself.__get_cell_state(y, x - 1),\n\t\t\t\t\tself.__get_cell_state(y, x + 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x),\n\t\t\t\t\tself.__get_cell_state(y + 1, x + 1)\n\t\t\t\t]\n\t\t\t\tnum_neighbors = sum(neighbors)\n\t\t\t\tstate = get_new_state(cell, num_neighbors)\n\t\t\t\tnext_board[y].append(state)\n\n\t\tself.__board = next_board\n\t\tself.__display(self.__board)", "def _update_board(self):\n\n self.game_board.update_board(self.tetrino_set)", "def run_game(self, board):\n run_program = True\n\n while run_program:\n # eventlistener for mouse events\n for event in pygame.event.get():\n if pygame.mouse.get_pressed() and event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Get position of mouse.\n (x, y) = pygame.mouse.get_pos()\n\n # Set circle position in the middle of the grid_square.\n draw_x = x - (x % self.square_size) + self.square_mid\n\n # Calculation to get xPosition from selected Mouse xPosition.\n x = x // 80\n\n # Check if column is full before placing. Break out if that's the case.\n if self.check_if_column_full(board, x):\n break\n\n # Calculate the yPosition, where the chip should be placed with various helper methods.\n draw_y = self.height - (self.square_size * self.draw_dict_mapping[self.get_y_pos(board, x)]) + 40\n\n # Check, which players turn it is.\n if self.playerOne:\n # Player Ones turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 1\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 1):\n run_program = False\n self.switch_player()\n else:\n # Player Twos turn.\n pos = (x, self.get_y_pos(board, x))\n if board[pos] == 0:\n board[pos] = 2\n self.draw_circle(draw_x, draw_y, self.playerOne)\n self.screen.blit(self.background, (0, 0))\n if self.check_if_user_won(board, pos, 2):\n run_program = False\n self.switch_player()\n\n if event.type == pygame.KEYDOWN:\n # End the game with escape.\n if event.key == pygame.K_ESCAPE:\n self.draw = True\n run_program = False\n\n # End the Program with the X in the upper right corner.\n elif event.type == pygame.QUIT:\n self.draw = True\n run_program = False\n\n pygame.display.flip()\n self.game_over(self.playerOne, self.draw)\n # wait for given time and end the game\n pygame.time.wait(5000)\n pygame.quit()", "def printBoard(self):", "def step(self):\n\t\tnewBoard = CellArray(self.size)\n\t\tfor i in range(0, self.size, 1):\n\t\t\tfor j in range(0, self.size, 1):\n\t\t\t\tnewBoard.board[i][j] = self.changeCell(i, j)\n\t\tself.board = newBoard.board", "def populate_board(self):\n for key, value in self.game.white_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)\n for key, value in self.game.black_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)", "def computerTurn(board):\n\n i, j = bestMove(board)\n\n board[i][j] = computer\n pygame.time.delay(500)\n updateWindow(i, j, computer)", "def __init__(self, board=None):\n self.winner = None\n self.board = board or [self.__class__.EMPTY_POSITION_COUNTER] * 9", "def setup_board(self):\n\n for row in range(10):\n\n row_list = list()\n\n for column in range(9):\n\n row_list.append(None)\n\n self._board.append(row_list)", "def run(self):\r\n \r\n if not self.gameOver:\r\n screen.fill(COLOR3)\r\n self.board.drawBoard()\r\n self.handleEvents()\r\n for piece in self.board.pieces.values():\r\n piece.update()\r\n else:\r\n self.resetGame()\r\n pygame.display.update()", "def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]", "def _print_board(board):\r\n pass", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r)\n #if p != None and p.header.frame_id == \"chess_board\":\n # print \"Warning, frame is chess_board:\", c+str(r)", "def print_final_board(self, board):\n\t\tinterpreted = self.interpret_board(board)\n\t\tself.print_board(interpreted)", "def __init__(self, board):\n self.board = board", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def reset(self, board):", "def __init__(self):\n self._current_state = \"UNFINISHED\"\n self._start_color = \"RED\"\n self._board = Board()", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def update_board(self, mpos):\n pass", "def update(self, board):\n for row in range(8):\n for col in range(8):\n if board[row, col] == -1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[0])\n elif board[row, col] == -2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[2])\n elif board[row, col] == 0:\n self.circles[row][col].undraw()\n self.pieces[row][col].setFill(self.frame_colors[(row+col)%2])\n elif board[row, col] == 1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[1])\n elif board[row, col] == 2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[3])", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def print_current_board(self):\n\n # iterate through the range in reverse order\n for r in range(9, -2, -1):\n output = \"\"\n if r == 9 or r == 0:\n # then the top or bottom of the board\n output = \" +------------------------+\"\n elif r == -1:\n # then show the ranks\n output = \" a b c d e f g h\"\n else: # board\n output = \" \" + str(r) + \" |\"\n # fill in all the files with pieces at the current rank\n for file_offset in range(0, 8):\n # start at a, with with file offset increasing the char\n f = chr(ord(\"a\") + file_offset)\n current_piece = None\n for piece in self.game.pieces:\n if piece.file == f and piece.rank == r:\n # then we found the piece at (file, rank)\n current_piece = piece\n break\n\n code = \".\" # default \"no piece\"\n if current_piece:\n # the code will be the first character of their type\n # e.g. 'Q' for \"Queen\"\n code = current_piece.type[0]\n\n if current_piece.type == \"Knight\":\n # 'K' is for \"King\", we use 'N' for \"Knights\"\n code = \"N\"\n\n if current_piece.owner.id == \"1\":\n # the second player (black) is lower case.\n # Otherwise it's uppercase already\n code = code.lower()\n\n output += \" \" + code + \" \"\n\n output += \"|\"\n print(output)", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')", "def play(self):\n board = Board()\n print(\"Let's play tic-tac-toe against computer!\")\n print(\"Here is your board!\")\n count = 1\n print(board)\n while True:\n board.person_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n board.make_computer_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n print(f\"Board after {count} action.\")\n count += 1\n print(board)", "def start(self):\n # store a sign controlling addition or subtraction so pieces move in the right direction\n self.board = fen_to_position(self.game.fen)\n self.transposition_table = dict()", "def start():\n boards = [Board(board_size, number_of_game_pieces, 1), Board(board_size, number_of_game_pieces, 2)]\n gameover = False\n quitgame = False\n i = 1\n while not gameover:\n coords_accepted = False\n while not coords_accepted:\n inp = input(\n f\"Player {boards[(i + 1) % 2].player_id}, what is the coordinate you're targeting (row,column,layer)?\")\n if inp == \"show\":\n print(boards[(i + 1) % 2])\n continue\n elif inp == \"quit\":\n quitgame = True\n break\n elif boards[i].test_coords_valid(inp):\n coords_accepted = True\n else:\n print(\"Invalid coordinates. \")\n if quitgame:\n print(\"Quitting game\")\n break\n x, y, z = eval(inp)\n gameover = boards[i].strike(x, y, z)\n if gameover:\n print(f\"Game over, player #{boards[(i + 1) % 2].player_id} won!\")\n i = (i + 1) % 2", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass", "def evaluate(self, board):", "def update(self, board):\n self.update_border()\n self.update_score_and_level(board)\n self.update_next_piece(board)\n\n self.update_settled_pieces(board)\n\n self.update_falling_piece(board)\n self.update_shadow(board)\n\n self.refresh_screen()", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def set_game_params(self, board):\n self.board = board\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 1:\n self.pos = (i, j)\n break\n self.max_fruit_turn = min(len(board), len(board[0]))", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def __init__(self):\n self.board = [\n BS, BS, BS, BS,\n BS, BS, BS,\n BS, BS, BS, BS,\n EM, EM, EM,\n WS, WS, WS, WS,\n WS, WS, WS,\n WS, WS, WS, WS\n ]\n self.curr_player = WHITE_PLAYER", "def visualisation(board):\n\n\n if board.width == 6:\n\n print(board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1])\n print(\" \")\n print(board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1], \" \", board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1])\n print(\" \")\n print(board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1])\n print(\" \")\n print(board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1])\n print(\" \")\n print(board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1], \" \", board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1])\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1])\n print(\" \")\n\n if board.width == 9:\n\n print(board.coordinates[72][1], \" \", board.coordinates[73][1], \" \", board.coordinates[74][1], \" \", board.coordinates[75][1], \" \", board.coordinates[76][1], \" \", board.coordinates[77][1], \" \", board.coordinates[78][1], \" \", board.coordinates[79][1], \" \", board.coordinates[80][1])\n print(\" \")\n print(board.coordinates[63][1], \" \", board.coordinates[64][1], \" \", board.coordinates[65][1], \" \", board.coordinates[66][1], \" \", board.coordinates[67][1], \" \", board.coordinates[68][1], \" \", board.coordinates[69][1], \" \", board.coordinates[70][1], \" \", board.coordinates[71][1])\n print(\" \")\n print(board.coordinates[54][1], \" \", board.coordinates[55][1], \" \", board.coordinates[56][1], \" \", board.coordinates[57][1], \" \", board.coordinates[58][1], \" \", board.coordinates[59][1], \" \", board.coordinates[60][1], \" \", board.coordinates[61][1], \" \", board.coordinates[62][1])\n print(\" \")\n print(board.coordinates[45][1], \" \", board.coordinates[46][1], \" \", board.coordinates[47][1], \" \", board.coordinates[48][1], \" \", board.coordinates[49][1], \" \", board.coordinates[50][1], \" \", board.coordinates[51][1], \" \", board.coordinates[52][1], \" \", board.coordinates[53][1])\n print(\" \")\n print(board.coordinates[36][1], \" \", board.coordinates[37][1], \" \", board.coordinates[38][1], \" \", board.coordinates[39][1], \" \", board.coordinates[40][1], \" \", board.coordinates[41][1], \" \", board.coordinates[42][1], \" \", board.coordinates[43][1], \" \", board.coordinates[44][1])\n print(\" \")\n print(board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1], \" \", board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1])\n print(\" \")\n print(board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1], \" \", board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1])\n print(\" \")\n print(board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1], \" \", board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1])\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1], \" \", board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1])\n print(\" \")\n\n\n if board.width == 12:\n print(board.coordinates[132][1], \" \", board.coordinates[133][1], \" \", board.coordinates[134][1], \" \", board.coordinates[135][1], \" \", board.coordinates[136][1], \" \", board.coordinates[137][1], \" \", board.coordinates[138][1], \" \", board.coordinates[139][1], \" \", board.coordinates[140][1], \" \", board.coordinates[141][1], \" \", board.coordinates[142][1], \" \", board.coordinates[143][1], \" \", )\n print(\" \")\n print(board.coordinates[120][1], \" \", board.coordinates[121][1], \" \", board.coordinates[122][1], \" \", board.coordinates[123][1], \" \", board.coordinates[124][1], \" \", board.coordinates[125][1], \" \", board.coordinates[126][1], \" \", board.coordinates[127][1], \" \", board.coordinates[128][1], \" \", board.coordinates[129][1], \" \", board.coordinates[130][1], \" \", board.coordinates[131][1], \" \", )\n print(\" \")\n print(board.coordinates[108][1], \" \", board.coordinates[109][1], \" \", board.coordinates[110][1], \" \", board.coordinates[111][1], \" \", board.coordinates[112][1], \" \", board.coordinates[113][1], \" \", board.coordinates[114][1], \" \", board.coordinates[115][1], \" \", board.coordinates[116][1], \" \", board.coordinates[117][1], \" \", board.coordinates[118][1], \" \", board.coordinates[119][1], \" \", )\n print(\" \")\n print(board.coordinates[96][1], \" \", board.coordinates[97][1], \" \", board.coordinates[98][1], \" \", board.coordinates[99][1], \" \", board.coordinates[100][1], \" \", board.coordinates[101][1], \" \", board.coordinates[102][1], \" \", board.coordinates[103][1], \" \", board.coordinates[104][1], \" \", board.coordinates[105][1], \" \", board.coordinates[106][1], \" \", board.coordinates[107][1], \" \", )\n print(\" \")\n print(board.coordinates[84][1], \" \", board.coordinates[85][1], \" \", board.coordinates[86][1], \" \", board.coordinates[87][1], \" \", board.coordinates[88][1], \" \", board.coordinates[89][1], \" \", board.coordinates[90][1], \" \", board.coordinates[91][1], \" \", board.coordinates[92][1], \" \", board.coordinates[93][1], \" \", board.coordinates[94][1], \" \", board.coordinates[95][1], \" \", )\n print(\" \")\n print(board.coordinates[72][1], \" \", board.coordinates[73][1], \" \", board.coordinates[74][1], \" \", board.coordinates[75][1], \" \", board.coordinates[76][1], \" \", board.coordinates[77][1], \" \", board.coordinates[78][1], \" \", board.coordinates[79][1], \" \", board.coordinates[80][1], \" \", board.coordinates[81][1], \" \", board.coordinates[82][1], \" \", board.coordinates[83][1], \" \", )\n print(\" \")\n print(board.coordinates[60][1], \" \", board.coordinates[61][1], \" \", board.coordinates[62][1], \" \", board.coordinates[63][1], \" \", board.coordinates[64][1], \" \", board.coordinates[65][1], \" \", board.coordinates[66][1], \" \", board.coordinates[67][1], \" \", board.coordinates[68][1], \" \", board.coordinates[69][1], \" \", board.coordinates[70][1], \" \", board.coordinates[71][1], \" \", )\n print(\" \")\n print(board.coordinates[48][1], \" \", board.coordinates[49][1], \" \", board.coordinates[50][1], \" \", board.coordinates[51][1], \" \", board.coordinates[52][1], \" \", board.coordinates[53][1], \" \", board.coordinates[54][1], \" \", board.coordinates[55][1], \" \", board.coordinates[56][1], \" \", board.coordinates[57][1], \" \", board.coordinates[58][1], \" \", board.coordinates[59][1], \" \", )\n print(\" \")\n print(board.coordinates[36][1], \" \", board.coordinates[37][1], \" \", board.coordinates[38][1], \" \", board.coordinates[39][1], \" \", board.coordinates[40][1], \" \", board.coordinates[41][1], \" \", board.coordinates[42][1], \" \", board.coordinates[43][1], \" \", board.coordinates[44][1], \" \", board.coordinates[45][1], \" \", board.coordinates[46][1], \" \", board.coordinates[47][1], \" \", )\n print(\" \")\n print(board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1], \" \", board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1], \" \", board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1], \" \", )\n print(\" \")\n print(board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1], \" \", board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1], \" \", )\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1], \" \", board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1], \" \", board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1], \" \", )\n print(\" \")", "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)", "def update_board(self):\n for x in self.board:\n for f in x:\n if f.status == 0:\n if f.name == \"conway\":\n assert type(self.population)==int\n if f.live_neighbors == 3:\n f.symbol =\"*\"\n f.status = 1\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3 :\n f.status = 1\n f.symbol = str(f.age)\n self.population += 1\n else:\n f.status = 0\n\n elif f.status == 1:\n if f.name == \"conway\":\n assert type(self.population)==int\n #assert type(f.status)== 1\n if not((f.live_neighbors == 2 or f.live_neighbors == 3)):\n f.symbol = \".\"\n f.status = 0\n else:\n self.population += 1\n elif f.name == \"fredkin\":\n if f.live_neighbors == 1 or f.live_neighbors == 3:\n f.status = 1\n f.age += 1\n if f.age <= 2:\n f.symbol = str(f.age)\n self.population += 1\n else:\n self.board.replace(f, Conway_Cell(\"*\"))\n else:\n f.status = 0\n f.symbol = \"-\"", "def place(self, board):\r\n self.board = board", "def reset_board(self):\n\n self.board = np.array(self.initial_board)", "def play_turn(self, cur_board):\n pass", "def main_function(board):\r\n print(\"Welcome to tic-tac-toe game!\")\r\n printboard.print_board(board)\r\n if check.is_board_full(board):\r\n print(\"Tie game\")\r\n while not (check.is_board_full(board)):\r\n if not (winner.is_winner(board, 'O')):\r\n playermove.player_move(board)\r\n printboard.print_board(board)\r\n\r\n if check.is_board_full(board):\r\n print(\"Tie game\")\r\n break\r\n else:\r\n print(\"You loose! try again\")\r\n break\r\n\r\n if not (winner.is_winner(board, 'X')):\r\n move = computermove.computer_move(board)\r\n\r\n if move == 0:\r\n print(\" \")\r\n else:\r\n insertletter.insert_letter(board, 'O', move)\r\n print('computer placed an o on position', move, ':')\r\n printboard.print_board(board)\r\n if check.is_board_full(board):\r\n print(\"Tie game\")\r\n break\r\n else:\r\n print(\"You win!\")\r\n break", "def start_game() -> None:\n rows = get_int()\n cols = get_int()\n state = game.GameState(rows, cols)\n\n line = next_line()\n if line == 'CONTENTS':\n rowList = []\n for i in range(rows):\n row = []\n line = raw_next_line()\n for index in range(cols):\n row.append(line[index])\n rowList.append(row)\n state.set_board_contents(rowList)\n\n while True:\n _display_board(state)\n line = next_line()\n if line == 'Q':\n return\n if line == '':\n if state.tick():\n _display_board(state)\n break\n else:\n _process_command(line, state)\n print('GAME OVER')", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def callback(self, message):\n rospy.sleep(0.1)\n self.listener.mutex.acquire()\n \n # transform pieces\n pieces_transformed = list()\n for piece in message.pieces:\n ps = PoseStamped()\n ps.header.frame_id = piece.header.frame_id\n ps.pose = piece.pose\n pose = self.listener.transformPose(\"chess_board_raw\", ps)\n #print ps, pose\n piece.pose = pose.pose\n piece.header.frame_id = \"chess_board_raw\"\n #col = self.board.getColName(int(piece.pose.position.x/SQUARE_SIZE))\n #rank = int(piece.pose.position.y/SQUARE_SIZE) + 1\n #print col,rank\n \n #print message.pieces\n\n # iterate through ways board could be inproperly localized\n\tprob_piece_gone = list() # locations moved from\n\tprob_piece_new = list() # locations moved to\n\tprob_piece_color = list() # locations that have changed color\n prob_cnt = 0;\n for x_off in [0,-1,1]:\n for y_off in [0,-1,1]:\n piece_gone = list() # locations moved from\n piece_new = list() # locations moved to\n piece_color = list() # locations that have changed color\n # process ChessBoard message\n rospy.loginfo(\"x_off: %s ; y_off: %s\" % (x_off,y_off))\n temp_board = BoardState(self.board.side)\n for piece in message.pieces:\n # get col, rank as \"x0\"\n if self.board.side == self.board.WHITE or self.board.side == None:\n col = self.board.getColName(int(piece.pose.position.x/SQUARE_SIZE))\n rank = int(piece.pose.position.y/SQUARE_SIZE + y_off) + 1\n else:\n col = self.board.getColName(7 - int(piece.pose.position.x/SQUARE_SIZE + x_off))\n rank = 8 - int(piece.pose.position.y/SQUARE_SIZE + y_off)\n if not self.board.valid(col, rank):\n print \"invalid: \", col, rank\n continue\n\n # update temp board\n if temp_board.getPiece(col, rank) == None:\n p = self.board.getPiece(col, rank)\n if p == None and not self.board.side == None:\n piece_new.append([col, rank, piece]) \n rospy.loginfo(\"Piece moved to: %s%s\" % (col,str(rank)))\n temp_board.setPiece(col, rank, piece)\n\n # see how board has changed\n move_from_cnt = 0\n captured_cnt = 0\n no_move_cnt = 0\n for col in 'abcdefgh':\n for rank in [1,2,3,4,5,6,7,8]:\n old = self.board.getPiece(col,rank)\n new = temp_board.getPiece(col,rank) \n if new == None and old != None:\n # this piece is gone!\n piece_gone.append([col, rank, old])\n rospy.loginfo(\"Piece moved from: %s%s\" % (col,str(rank)))\n move_from_cnt = move_from_cnt + 1\n # elif old != None and new != None and new.type/abs(float(new.type)) != old.type/abs(float(old.type)):\n elif old != None and new != None and new.type/abs(float(new.type)) != old.type/abs(float(old.type)):\n # capture!\n piece_color.append([col, rank, new])\n rospy.loginfo(\"Piece captured: %s%s %s %s\" % (col,str(rank),new.type, old.type)) \n captured_cnt = captured_cnt + 1\n elif old != None and new != None:\n # boring, but update types!\n new.type = old.type\n temp_board.setPiece(col,rank,new)\n rospy.loginfo(\"No Piece moved\")\n no_move_cnt = no_move_cnt + 1\n\n rospy.loginfo(\"xoff %d y %d from %d capt %d no_move %d\" % (x_off, y_off, move_from_cnt, captured_cnt, no_move_cnt))\n # ARD\n # ARD\n # ARD\n if len(piece_new) + len(piece_color) <= 5:\n for entry in piece_gone:\n\t prob_piece_gone.append( = list() # locations moved from\n\tprob_piece_new = list() # locations moved to\n\tprob_piece_color = list() # locations that have changed color\n prob_cnt = 0;\n\n # ARD\n # ARD\n # ARD\n # plausibility test: there can only be one change or new piece\n if self.board.side == None:\n temp_board.printBoard()\n if len(piece_color) + len(piece_new) == 0 and len(piece_gone) == 0:\n rospy.loginfo(\"No side set, but we are probably white.\")\n self.board.last_move = \"none\"\n return self.setWithOffset(x_off, y_off, temp_board)\n\n elif len(piece_color) >= 32 and len(piece_new) == 0:\n rospy.loginfo(\"No side set, but we are probably black.\")\n self.board.last_move = \"none\"\n return self.setWithOffset(x_off, y_off, temp_board)\n\n else:\n rospy.loginfo(\"Try again, %d\" % (len(piece_new) + len(piece_color))) \n self.board.last_move = \"fail\"\n self.up_to_date = True\n self.listener.mutex.release()\n return\n\n elif len(piece_new) + len(piece_color) > 5:\n # try another offset\n rospy.loginfo(\"Try again, %d %d \" % (len(piece_new),len(piece_color)))\n continue\n elif len(piece_new) + len(piece_color) != 1:\n # castling\n self.board.castling_move = self.board.isCastling(piece_new, piece_color, piece_gone)\n if self.board.castling_move != None:\n # castling\n rospy.loginfo(\"Castling, %s\" % self.board.castling_move)\n\n m = self.board.castling_move\n print m\n #self.copyType(m[0], m[1], m[2], m[3], temp_board)\n to = ChessPiece()\n to.type = self.board.side * ChessPiece.BLACK_KING\n temp_board.setPiece(m[2],int(m[3]),to)\n\n m = castling_extras[m]\n print m\n #self.copyType(m[0], m[1], m[2], m[3], temp_board)\n to = ChessPiece()\n to.type = self.board.side * ChessPiece.BLACK_ROOK\n temp_board.setPiece(m[2],int(m[3]),to)\n self.board.previous = [self.board.values, self.board.last_move] \n self.board.last_move = self.board.castling_move\n return self.setWithOffset(x_off, y_off, temp_board)\n\n rospy.loginfo(\"Try again, %d\" % (len(piece_new) + len(piece_color))) \n self.board.last_move = \"fail\"\n self.up_to_date = True\n self.listener.mutex.release()\n return\n\n # if our pieces are missing, fill them in (bishops!)\n candidates = list()\n for entry in piece_gone:\n (col, rank, piece) = entry\n if piece.type/abs(piece.type) == self.board.side:\n # fill in\n temp_board.setPiece(col, rank, self.board.getPiece(col,rank))\n rospy.loginfo(\"fill in %s%s\" % (col,str(rank))) \n else:\n candidates.append(entry)\n if len(candidates) == 0:\n rospy.loginfo(\"Try again, no candidates\") \n self.board.last_move = \"fail\"\n self.up_to_date = True\n self.listener.mutex.release()\n return\n if len(candidates) > 1: # too many possibilities (added 3AM on Wednesday!)\n rospy.loginfo(\"Try again, too many candidates %d\" % len(candidates)) \n self.board.last_move = \"fail\"\n self.up_to_date = True\n self.listener.mutex.release()\n return\n \n # find the corresponding piece\n if len(piece_new) == 1:\n piece_to = piece_new[0]\n else:\n piece_to = piece_color[0]\n piece_fr = candidates[0]\n # update type\n self.board.copyType(piece_fr[0], piece_fr[1], piece_to[0], piece_to[1], temp_board)\n \n # set outputs\n self.board.previous = [self.board.values, self.board.last_move] \n self.board.last_move = piece_fr[0] + str(piece_fr[1]) + piece_to[0] + str(piece_to[1])\n rospy.loginfo(\"last move: %s\" % self.board.last_move)\n return self.setWithOffset(x_off,y_off,temp_board)\n # no match\n\trospy.loginfo(\"Try again; no match\")\n\tself.board.last_move = \"fail\"\n\tself.up_to_date = True\n\tself.listener.mutex.release()\n\treturn", "def __init__(self):\n self.board = [[0 for i in range(9)]]*9\n self.board = [[0, 0, 0, 0, 3, 0, 9, 0, 0],\n [0, 0, 3, 0, 8, 0, 0, 0, 7],\n [6, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 5, 8, 3, 6, 0, 0, 0, 0],\n [0, 1, 0, 8, 9, 4, 0, 6, 0],\n [0, 0, 0, 0, 2, 7, 8, 4, 0],\n [0, 0, 9, 0, 0, 0, 0, 0, 8],\n [7, 0, 0, 0, 4, 0, 6, 0, 0],\n [0, 0, 5, 0, 1, 0, 0, 0, 0]]", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"", "def init_new_board(self) -> None:\r\n\r\n TkState.enable(self.settings_menu.winfo_children())\r\n TkState.enable(self.edit_menu.winfo_children())\r\n TkState.enable([self.play_button, self.step_button])\r\n TkState.disable([self.reset_button])\r\n\r\n self.gen_number.config(text = 0)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n\r\n self.animator.board = self.anim_board\r\n self.painter.board = self.anim_board\r\n self.painter.adjust_to_canvas()", "def reset(self):\n self.board = np.zeros((8, 8), dtype=np.int)\n self.done = False\n self.actions = []\n self.turn = 0\n for i in range(8):\n for j in range(8):\n if (i+j)%2!=0:\n if i<3:\n self.board[i,j]=1\n if i==2:\n moves=(self.fdiag(i,j),self.fadiag(i,j))\n for r in range(len(moves)):\n if moves[r] is not None:\n self.actions.append(moves[r])\n if i>4:\n self.board[i,j]=-1", "def display(self):\n board_dict = dict()\n if self.name == 'two_players_double_board':\n # fill all cells of board_dict with \" \" for empty cells\n letters = [chr(i+97) for i in range(0, 16)]\n for number in range(0, 12):\n for letter in letters:\n position = letter + str(number + 1)\n board_dict[position] = ' '\n\n board_dict = self.set_piece_in_cell(board_dict)\n # first line of board\n board_str = \" |\"\n for i in range(0, 16):\n board_str += chr(i + 97).ljust(2, ' ') + \" | \"\n\n print(board_str)\n\n # print board\n for number in range(0, 12):\n print(\"-\" * 82)\n print(str(number + 1).rjust(2, ' '), end=\"|\")\n for letter in letters:\n position = letter + str(number + 1)\n piece = board_dict[position]\n print(str(piece).ljust(2, ' ') + ' |', end=\" \")\n print()\n print(\"-\" * 82)\n print(\"\\n\")\n print(\"END OF TWO_PLAYERS BOARD\")\n\n elif self.name == \"four_players_board\":\n # fill all cells of board_dict with \" \" for empty cells\n letters = [chr(i+97) for i in range(0, 14)]\n for number in range(0, 14):\n for letter in letters:\n position = letter + str(number + 1)\n board_dict[position] = ' '\n\n board_dict = self.set_piece_in_cell(board_dict)\n # first line of board\n board_str = \" |\"\n for i in range(0, 14):\n board_str += chr(i + 97).ljust(2, ' ') + \" | \"\n\n print(board_str)\n\n empty_letters, empty_numbers = ['a', 'b', 'c', 'l', 'm', 'n'], ['1', '2', '3', '12', '13', '14']\n empty_cells_tuples = list(itertools.product(empty_letters, empty_numbers))\n empty_cells = []\n for tupl in empty_cells_tuples:\n empty_cells.append(tupl[0] + tupl[1])\n # print board\n for number in range(0, 14):\n print(\"-\" * 76)\n print(str(number + 1).rjust(2, ' '), end=\"|\")\n for letter in letters:\n position = letter + str(number + 1)\n piece = board_dict[position]\n if position not in empty_cells:\n print(str(piece).ljust(2, ' ') + ' |', end=\" \")\n\n else:\n if position.startswith('c'):\n print(' ', end='| ')\n\n else:\n print(' ', end=' ')\n\n print()\n print(\"-\" * 76)\n print(\"\\n\")\n print(\"END OF FOUR_PLAYERS BOARD\")\n\n elif self.name == \"three_players_board\":\n # fill all cells of board_dict with \" \" for empty cells\n letters = [chr(i + 97) for i in range(0, 22)]\n for number in range(0, 22):\n for letter in letters:\n position = letter + str(number + 1)\n board_dict[position] = ' '\n\n # first line of board\n board_str = \" |\"\n for i in range(0, 22):\n board_str += chr(i + 97).ljust(2, ' ') + \" | \"\n\n print(board_str)\n\n empty_cells = []\n for i in range(7):\n for j in range(1, 7 - i + 1):\n position = letters[i] + str(j)\n empty_cells.append(position)\n\n for j in range(1, i + 2):\n position = letters[i + 15] + str(j)\n empty_cells.append(position)\n\n for j in range(10 + i, 17):\n position = letters[i] + str(j)\n empty_cells.append(position)\n\n for j in range(16 - i, 17):\n position = letters[i + 15] + str(j)\n empty_cells.append(position)\n\n board_dict = self.set_piece_in_cell(board_dict)\n\n # print board\n for number in range(0, 16):\n print(\"-\" * 106)\n print(str(number + 1).rjust(2, ' '), end=\"|\")\n for letter in letters:\n position = letter + str(number + 1)\n piece = board_dict[position]\n if position not in empty_cells:\n print(str(piece).ljust(2, ' ') + ' |', end=\" \")\n\n\n else:\n if position == 'g1' or position == 'g16':\n print(' ', end='| ')\n elif position.startswith('c'):\n print(' ', end=' ')\n\n else:\n print(' ', end=' ')\n\n print()\n print(\"-\" * 106)\n print(\"\\n\")\n print(\"END OF THREE_PLAYERS BOARD\")", "def print_board(board):\n\n colors = {\n '*': None,\n '2': 'red',\n '4': 'green',\n '8': 'yellow',\n '16': 'blue',\n '32': 'magenta',\n '64': 'cyan',\n '128': 'grey',\n '256': 'white',\n '512': 'green',\n '1024': 'red',\n '2048': 'blue',\n '4096': 'magenta'\n };\n header = \"Use the arrows keys to play 2048! Press q to quit\";\n print(header);\n N = len(board);\n vertical_edge = \"\";\n for i in range(N + 2):\n vertical_edge += \"-\\t\";\n print(vertical_edge);\n for y in range(N):\n row = \"\";\n for x in board[y]:\n\n # Handling installation fail (no colors printed)\n if termcolor is not None:\n row += termcolor.colored(x, colors[x]);\n else:\n row += x\n\n row += \"\\t\";\n print(\"|\\t\" + row + \"|\");\n if y is not N - 1: print(\"\")\n print(vertical_edge);\n\n if GUI_runnable:\n gui.update_grid(board)\n gui.update()", "def postion_fleet(self, starting_cell, board):\n pass", "def draw_board(board_state):\n print(\" {} | {} | {} \".format(board_state[6], board_state[7], board_state[8]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[3], board_state[4], board_state[5]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[0], board_state[1], board_state[2]))", "def __init__(self):\n\t\tself.current = Piece.EX\n\t\tself.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]", "def Restart():\n ResetBoard()\n global P\n global Board\n global cur\n global final\n if cur <= len(Level)-1:\n for i in range(size):\n for j in range(size):\n Board[i][j] = Level[cur][i][j]\n \n for i in range(size):\n for j in range(size):\n if Board[i][j]==\"P\":\n P = Player(i+1, j+1)\n elif Board[i][j]==\"W\":\n M = Pig(i+1, j+1, 0)\n M.symbol = Board[i][j]\n elif Board[i][j]==\"3\":\n M = Pig(i+1, j+1, 1)\n M.symbol = Board[i][j]\n elif Board[i][j]==\"E\":\n M = Pig(i+1, j+1, 2)\n M.symbol = Board[i][j]\n elif Board[i][j]==\"M\":\n M = Pig(i+1, j+1, 3)\n M.symbol = Board[i][j]\n elif Board[i][j]==\"B\":\n Slime(i+1, j+1)\n elif Board[i][j]==\"A\":\n Skull(i+1, j+1)\n elif Board[i][j]==\"N\":\n Stone(i+1, j+1)\n elif Board[i][j]==\"D\":\n Octopus(i+1, j+1)\n elif Board[i][j]==\"S\":\n Wall(i+1, j+1)\n elif Board[i][j]==\"F\":\n final = (i+1,j+1)\n else:\n global endless\n endless = True\n Board = []\n InitBoard(10)\n P = Player(2,4)\n cur+=3\n genMobs(cur)", "def reset_board():\n board = initial_state()\n emit(\"update\", board)", "def callback(self, message):\n # no need to update if already up to date\n if self.up_to_date == True:\n return\n\n # update transform\n self.transform = message.board_to_fixed\n\n piece_gone = list() # locations moved from\n piece_new = list() # locations moved to\n piece_color = list() # locations that have changed color\n\n # process ChessBoard message\n temp_board = BoardState(self.board.side)\n for piece in message.pieces:\n # get col, rank as \"x0\"\n if self.board.side == self.board.WHITE or self.board.side == None:\n col = self.board.getColName(int(piece.pose.position.x/SQUARE_SIZE))\n rank = int(piece.pose.position.y/SQUARE_SIZE) + 1\n else:\n col = self.board.getColName(7 - int(piece.pose.position.x/SQUARE_SIZE))\n rank = 8 - int(piece.pose.position.y/SQUARE_SIZE)\n if not self.board.valid(col, rank):\n print \"invalid: \", col, rank\n continue\n\n # update temp board\n if temp_board.getPiece(col, rank) == None:\n p = self.board.getPiece(col, rank)\n if p == None and not self.board.side == None:\n piece_new.append([col, rank, piece])\n rospy.loginfo(\"Piece moved to: %s%s\" % (col,str(rank)))\n temp_board.setPiece(col, rank, piece)\n\n # see how board has changed\n for col in 'abcdefgh':\n for rank in [1,2,3,4,5,6,7,8]:\n old = self.board.getPiece(col,rank)\n new = temp_board.getPiece(col,rank)\n if new == None and old != None:\n # this piece is gone!\n piece_gone.append([col, rank, old])\n rospy.loginfo(\"Piece moved from: %s%s\" % (col,str(rank)))\n elif old != None and new != None and new.type/abs(float(new.type)) != old.type/abs(float(old.type)):\n # capture!\n piece_color.append([col, rank, new])\n rospy.loginfo(\"Piece captured: %s%s\" % (col,str(rank)))\n elif old != None and new != None:\n # boring, but update types!\n new.type = old.type\n new.header.frame_id = old.header.frame_id\n temp_board.setPiece(col,rank,new)\n\n # plausibility test: there can only be one change or new piece\n if self.board.side == None:\n temp_board.printBoard()\n if len(piece_color) + len(piece_new) == 0 and len(piece_gone) == 0:\n rospy.loginfo(\"No side set, but we are probably white.\")\n self.board.last_move = \"none\"\n return self.setBoard(temp_board)\n\n elif len(piece_color) >= 32 and len(piece_new) == 0:\n rospy.loginfo(\"No side set, but we are probably black.\")\n self.board.last_move = \"none\"\n return self.setBoard(temp_board)\n\n else:\n rospy.logdebug(\"Try again, %d\" % (len(piece_new) + len(piece_color)))\n self.board.last_move = \"fail\"\n return\n\n elif len(piece_new) + len(piece_color) != 1:\n # castling\n self.board.castling_move = self.board.isCastling(piece_new, piece_color, piece_gone)\n if self.board.castling_move != None:\n # castling\n rospy.loginfo(\"Castling, %s\" % self.board.castling_move)\n\n m = self.board.castling_move\n print m\n #self.copyType(m[0], m[1], m[2], m[3], temp_board)\n to = ChessPiece()\n to.type = self.board.side * ChessPiece.BLACK_KING\n if to.type > 0:\n to.header.frame_id = \"wking\"\n else:\n to.header.frame_id = \"bking\"\n temp_board.setPiece(m[2],int(m[3]),to)\n\n m = castling_extras[m]\n print m\n #self.copyType(m[0], m[1], m[2], m[3], temp_board)\n to = ChessPiece()\n to.type = self.board.side * ChessPiece.BLACK_ROOK\n if to.type > 0:\n if m[0] == 0:\n to.header.frame_id = \"wrook0\"\n else:\n to.header.frame_id = \"wrook1\"\n else:\n if m[0] == 0:\n to.header.frame_id = \"brook0\"\n else:\n to.header.frame_id = \"brook1\"\n temp_board.setPiece(m[2],int(m[3]),to)\n self.board.previous = [self.board.values, self.board.last_move]\n self.board.last_move = self.board.castling_move\n return self.setBoard(temp_board)\n\n rospy.logdebug(\"Try again, %d\" % (len(piece_new) + len(piece_color)))\n self.board.last_move = \"fail\"\n return\n\n # if our pieces are missing, fill them in (bishops!)\n candidates = list()\n for entry in piece_gone:\n (col, rank, piece) = entry\n if piece.type/abs(piece.type) == self.board.side:\n # fill in\n temp_board.setPiece(col, rank, self.board.getPiece(col,rank))\n else:\n candidates.append(entry)\n if len(candidates) == 0:\n rospy.loginfo(\"Try again, no candidates\")\n self.board.last_move = \"fail\"\n return\n if len(candidates) > 1: # too many possibilities (added 3AM on Wednesday!)\n rospy.loginfo(\"Try again, too many candidates %d\" % len(candidates))\n self.board.last_move = \"fail\"\n return\n\n # find the corresponding piece\n if len(piece_new) == 1:\n piece_to = piece_new[0]\n else:\n # remove the old piece from the planning scene\n self.last_capture = self.board.getPieceId(self.board.getPiece(piece_color[0][0],piece_color[0][1]))\n piece_to = piece_color[0]\n piece_fr = candidates[0]\n # update type\n self.board.copyType(piece_fr[0], piece_fr[1], piece_to[0], piece_to[1], temp_board)\n\n # set outputs\n self.board.previous = [self.board.values, self.board.last_move]\n self.board.last_move = piece_fr[0] + str(piece_fr[1]) + piece_to[0] + str(piece_to[1])\n return self.setBoard(temp_board)", "def start():\n display_board()\n print(\"\\n\")\n y_n_prompt()", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def __init__(self):\n self._board = [\n\n ['', '', '', \"x\", '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n [\"o\", '', \"o\", '', \"o\", '', \"o\", ''],\n ]\n self._game_state = \"UNFINISHED\" # default game state\n self._current_row = 0 #helper used to enforce moving one row at a time\n self._current_x_row = 0 # tracks x's row coordinate\n self._current_x_column = 3 # tracks x's column coordinate\n\n #four coordinates tracking the available diagonal spaces of x\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n\n #helper used to check if x is in the first column\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n #helper used to check if x is in the last column\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])", "def init_board():\n board = ['#', 1, 2, 3, 4, 5, 6, 7, 8, 9]\n return board", "def applyMove(self, (from_row,from_col), (to_row,to_col)):\n newboard = deepcopy(self)\n piece = newboard.board[from_row][from_col]\n newboard.board[from_row][from_col] = None\n newboard.board[to_row][to_col] = piece\n newboard.toplay = 'BLACK' if self.toplay == 'WHITE' else 'WHITE'\n return newboard", "def _update_board(self, start: (int, int), dest: (int, int), extra_info=''):\n\n piece = self.board[start[0]][start[1]]\n\n # Move the piece itself\n self.board[dest[0]][dest[1]] = piece\n self.board[start[0]][start[1]] = EMPTY_SPACE\n\n # Special moves\n if extra_info:\n\n # Castling kingside\n if extra_info == CASTLE_KINGSIDE:\n row = self._get_castling_row()\n\n # We already moved the king, so we just need to move the rook\n self.board[row][5] = self.board[row][7]\n self.board[row][7] = EMPTY_SPACE\n\n elif extra_info == CASTLE_QUEENSIDE:\n row = self._get_castling_row()\n\n # King already moved, so just update the rook\n self.board[row][3] = self.board[row][0]\n self.board[row][0] = EMPTY_SPACE\n\n else: # Pawn promotion\n self.board[dest[0]][dest[1]] = extra_info\n\n # en passant\n self._update_en_passant(start, dest, piece)" ]
[ "0.72907543", "0.711742", "0.6935223", "0.686896", "0.68323153", "0.6819589", "0.6773056", "0.6760123", "0.6743392", "0.6696341", "0.66691166", "0.6662628", "0.66314083", "0.6610427", "0.6580219", "0.6562503", "0.65226245", "0.6518735", "0.6464358", "0.64634323", "0.64540416", "0.6449444", "0.644684", "0.64343274", "0.6428283", "0.64280486", "0.6417007", "0.6388707", "0.6385433", "0.63709253", "0.6366341", "0.6355643", "0.63357747", "0.63332385", "0.6327222", "0.6326731", "0.6323989", "0.6313007", "0.6299683", "0.6297791", "0.6295983", "0.6291595", "0.62859476", "0.6285269", "0.6278348", "0.62775725", "0.6274462", "0.6272476", "0.6263888", "0.62341434", "0.6230609", "0.62158763", "0.62119573", "0.62108463", "0.6203667", "0.62029946", "0.62029946", "0.6199455", "0.619747", "0.6194865", "0.61909527", "0.6183302", "0.61828965", "0.61778855", "0.6176692", "0.6173906", "0.61617464", "0.6160786", "0.6143672", "0.61427164", "0.6138899", "0.6134118", "0.6130727", "0.6113673", "0.61136276", "0.61127657", "0.6109632", "0.610875", "0.6106611", "0.6099793", "0.60945934", "0.6094284", "0.6089584", "0.60813344", "0.6072772", "0.60695994", "0.6061841", "0.606135", "0.60611624", "0.60598755", "0.60545546", "0.6039627", "0.6034149", "0.60300124", "0.60205245", "0.6020151", "0.60159796", "0.6005206", "0.60048807", "0.6003089", "0.5999312" ]
0.0
-1
Function that calculates the biodiversity score based on the Biosafe output. the numbers 29.33 and 1.4349 follow from running MC simulations to determine the lowest and highest possible scores. The biodiversity score reflects the 0100% range between the two.
def set_score(self): if self.PotTax_intervention is None: if self.PotTax_reference is not None: self.score = (((self.PotTax_reference.sum().TFI - 29.33) / 1.4349) / 100) else: print("There is no Biosafe output to score") return else: self.score = (((self.PotTax_intervention.sum().TFI - 29.33) / 1.4349) / 100) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query):\n\tstart_time = time.time()\n\n\t#Load necessary data\n\t\"\"\"\twith open ('../../../Data/percentagesDict.pickle', 'rb') as f:\n\t\tpercentage_data = pickle.load(f)\n\n\twith open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f:\n\t\tsnack_data = pickle.load(f)\"\"\"\n\n\t#Set constants\n\tLOW_FAT = .3\n\tHIGH_FAT = .6\n\tLOW_CARB = .1\n\tHIGH_CARB = .2\n\tLOW_PRO = .2\n\tHIGH_PRO = .4\n\n\t#Convert macro percentages to 'high', 'med', 'low' categories\n\tfat = percentage_data[snack]['fat']\n\tprotein = percentage_data[snack]['protein']\n\tcarb = percentage_data[snack]['carb']\n\n\tif fat > HIGH_FAT:\n\t\tfat_content = 'high'\n\telif fat < LOW_FAT:\n\t\tfat_content = 'low'\n\telse:\n\t\tfat_content = 'med'\n\n\tif protein > HIGH_PRO:\n\t\tprotein_content = 'high'\n\telif protein < LOW_PRO:\n\t\tprotein_content = 'low'\n\telse:\n\t\tprotein_content = 'med'\n\n\tif carb > HIGH_CARB:\n\t\tcarb_content = 'high'\n\telif carb < LOW_CARB:\n\t\tcarb_content = 'low'\n\telse:\n\t\tcarb_content = 'med'\n\n\t#Set x values\n\tx1 = fat_query == fat_content\n\tx2 = carb_query == carb_content\n\tx3 = protein_query == protein_content\n\tx4 = cooccur(snack_data, snack, snack_query) \n\tx5 = snack_data[snack]['rating']\n\n\tw1 = 1\n\tw2 = 1\n\tw3 = 1\n\tw4 = 1\n\tw5 = 1\n\t\n\t#print('x1: {}, x2: {}, x3: {}, x4: {}, x5: {}'.format(x1, x2, x3, x4, x5))\n\t#print(\"get_score() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\n\treturn w1*x1 + w2*x2 + w3*x3 + w4*x4 + w5*x5", "def get_score(self, a, b):\n ### FILL IN ###", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))", "def prob5(file = 'crime_data.csv'):\n #Question one\n data = pd.read_csv(file)\n my_list = data.columns[(data.mean()>1500000)][2:]\n \n #Get the correlation between the three crimes\n corr = data[my_list].corr()\n prop_max = data['Property'].max()\n larc_max = data[\"Larceny\"].max()\n ans1 = 'Property'\n \n #Question 2 get the ammount of aggravated assaults\n new_data = data[data['Year']>=2000]\n new_data = new_data.sort_values('Murder',ascending=True)\n agg_as = new_data['Aggravated Assault']\n agg_as = agg_as[agg_as>850000]\n ans2 = agg_as.values\n \n #Question 3 get the highest year of crime and get the percentage of that\n S = 10\n N = int(len(data)/S)\n \n #Split the decades\n frames = [ data.iloc[i*S:(i+1)*S].copy() for i in range(N+1) ]\n dec_crime = []\n for dec in frames:\n dec_crime.append(dec['Total'].mean())\n \n #Get the highest crime and its percentage of the total\n my_dec = frames[np.argmax(dec_crime)]\n my_crimes = ['Violent','Property','Murder','Forcible Rape','Robbery','Aggravated Assault','Burglary','Larceny','Vehicle Theft']\n high_crime = my_dec[my_crimes].mean().idxmax()\n ans3 = float(my_dec[high_crime].mean()/my_dec['Total'].mean())\n return(ans1,ans2,ans3)", "def disp_score():", "def __calculate_ethnic_diversity_score(project: dict, student: dict) -> int:\n # project_name = project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"Calculating ethnic pairing score for: Project({}) - Student({})\".format(project_name, student_name))\n\n # Get the ethnicities specified by the student\n student_ethnicities = student[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, None)\n if not student_ethnicities:\n # The student didn't specify ethnicities, so we can't calculate a score\n return 0\n\n # Get the list of current assignments for the project team\n team_assignments = __get_team_assignments(project)\n\n # This list will hold the list of ethnicities on the team\n team_ethnicities = []\n for assignment in team_assignments:\n assigned_student_ethnicities = assignment.student[\"fields\"].get(SURVEY_ETHNICITIES_FIELD, None)\n\n if assigned_student_ethnicities:\n team_ethnicities.append(assigned_student_ethnicities)\n\n # Team ethnicities is going to be a list of lists, so let's flatten it\n team_ethnicities = [item for sublist in team_ethnicities for item in sublist]\n\n # ================================================================================================================\n # Get the count ethnicities for the already assigned students\n ethnicity_counter = __get_ethnicity_counter()\n ethnicity_counter.update(team_ethnicities)\n\n # Check each of the student's listed ethnicities and take the highest score\n best_ethnicity_score = 0\n for student_ethnicity in student_ethnicities:\n matching_ethnicity_count = ethnicity_counter.get(student_ethnicity)\n\n current_ethnicity_score = 0\n\n if matching_ethnicity_count == 0:\n # This is good, as it will make the team more diverse\n current_ethnicity_score = SURVEY_BASE_ETHNICITY_WEIGHT\n elif matching_ethnicity_count == 1:\n # This is better, as it will pair students with like ethnicities\n current_ethnicity_score = SURVEY_BASE_ETHNICITY_WEIGHT * 2\n\n # Check to see if this is a better match\n if current_ethnicity_score > best_ethnicity_score:\n best_ethnicity_score = current_ethnicity_score\n\n return best_ethnicity_score", "def bridge_score(bridge):\n return (bridge_strength(bridge), len(bridge))", "def University_calculation(jobid):\r\n min_cgpa=90\r\n \"\"\"~~~~~~~~~\"\"\"\r\n dbconnect= connect_to_db()\r\n Candidate_qualifications=pd.read_sql(\"select candidate_id,university_name,institute_name,aggregate from candidate_qualification where candidate_id in(select candidate_id from master_id where job_id=\"+str(jobid)+\")\",con=dbconnect)\r\n College_data=pd.read_sql(\"select * from college_tiers\",con=dbconnect)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 0 if x<0 or x>100 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: x*10 if 5<x<10 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 25*x if 0<x<4 else x)\r\n\r\n def Aggregate():\r\n Unique_candids=Candidate_qualifications[[\"candidate_id\",\"aggregate\"]].groupby(\"candidate_id\").mean()\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:x-min_cgpa)\r\n minval=min(Unique_candids[\"aggregate\"])\r\n maxval=max(Unique_candids[\"aggregate\"])\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:(x-minval)*100/(maxval-minval))\r\n Unique_candids=Unique_candids.reset_index()\r\n return Unique_candids\r\n \r\n def University_name():\r\n stop_words=[\"of\",\"on\",\"for\",\"the\",\"&\",\"and\"]\r\n unique_candids=list(np.unique(Candidate_qualifications[\"candidate_id\"]))\r\n candidate_univdict={}\r\n for i in unique_candids:\r\n candidate_univdict[i]=Candidate_qualifications[[\"university_name\",\"institute_name\"]][Candidate_qualifications[\"candidate_id\"]==i].values.tolist()\r\n candidate_univdict={k:list(map(lambda x:list(filter(lambda y:str(y).strip() not in[\"\",\"None\"],x)),v)) for k,v in candidate_univdict.items()}\r\n candidate_univdict={k: np.unique(list(itertools.chain.from_iterable(v))).tolist() for k,v in candidate_univdict.items()}\r\n for i in candidate_univdict.keys():\r\n for j in candidate_univdict[i]:\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"College\"]==j])[0]\r\n continue;\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"Ancronym\"]==j])[0]\r\n continue;\r\n else:\r\n Processed_collegedata=pd.DataFrame(College_data[\"College\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words]))\r\n Processed_collegedata[\"Ancronym\"]=College_data[\"Ancronym\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words])\r\n val=[w for w in re.sub(\"[\\.-]\",\" \",j.lower()).split() if w not in stop_words]\r\n Processed_collegedata[\"College\"]=Processed_collegedata[\"College\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n Processed_collegedata[\"Ancronym\"]=Processed_collegedata[\"Ancronym\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n maxval=Processed_collegedata.max().idxmax()\r\n if Processed_collegedata[maxval].idxmax()>0.5:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=College_data.ix[Processed_collegedata[maxval].idxmax(),\"Tier\"]\r\n else:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=3\r\n \r\n candidate_univdict={k:100/min(v) for k,v in candidate_univdict.items() if len(v)>0}\r\n College_score=pd.DataFrame.from_dict(candidate_univdict,orient=\"index\")\r\n College_score=College_score.reset_index()\r\n College_score.columns=[\"candidate_id\",\"Tier_score\"]\r\n return College_score\r\n result=pd.merge(Aggregate(),University_name(),how=\"outer\",on=\"candidate_id\")\r\n result=pd.merge(pd.DataFrame(np.unique(Candidate_qualifications[\"candidate_id\"]),columns=[\"candidate_id\"]),result,how=\"left\",on=\"candidate_id\")\r\n result=result.fillna(0)\r\n return result", "def bleu_score(ref_file, hyp_file):\n command = 'perl scripts/multi-bleu.pl ' + ref_file + ' < ' + hyp_file\n c = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n details, error = c.communicate()\n details, error = details.decode('utf-8'), error.decode('utf-8')\n if not details.startswith('BLEU ='):\n raise ValueError('Error in BLEU score computation:\\n%s' % error)\n else:\n BLEU_str = details.split(' ')[2][:-1]\n BLEU = float(BLEU_str)\n return BLEU, details", "def get_risk_profile_bas_scores(self):\n answers = self.current_risk_profile_responses\n if not answers:\n return None\n\n scores = (answers.values('b_score', 'a_score', 's_score').aggregate(b_score=Sum('b_score'),\n a_score=Sum('a_score'),\n s_score=Sum('s_score')))\n\n extents = (\n RiskProfileAnswer.objects.filter(question__group=self.risk_profile_group)\n .values('question').annotate(\n min_b=Min('b_score'), max_b=Max('b_score'),\n min_a=Min('a_score'), max_a=Max('a_score'),\n min_s=Min('s_score'), max_s=Max('s_score'),\n ).aggregate(\n min_b_sum=Sum('min_b'), max_b_sum=Sum('max_b'),\n min_a_sum=Sum('min_a'), max_a_sum=Sum('max_a'),\n min_s_sum=Sum('min_s'), max_s_sum=Sum('max_s'),\n )\n )\n\n max_b = extents['max_b_sum']\n max_a = extents['max_a_sum']\n max_s = extents['max_s_sum']\n return (\n scores['b_score'] / max_b if max_b > 0 else 0,\n scores['a_score'] / max_a if max_a > 0 else 0,\n scores['s_score'] / max_s if max_s > 0 else 0,\n )", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def stateQualtityScore(roomba):\n return 0", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def worst_score(self):\r\n pass", "def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score", "def score(A, B):\n assert 10 <= A <= 100 and 10 <= B <= 100 # you can't get too slow or too fast\n trackA = 100 - B\n trackB = 100 - A\n tA = trackA/A\n tB = trackB/B\n return tB - tA", "def muc_scores(self):\n A_card, B_card = self.shape\n V_card = len(self)\n N = self.grand_total\n\n recall = _div(N - V_card, N - A_card)\n precision = _div(N - V_card, N - B_card)\n fscore = hmean(recall, precision)\n return precision, recall, fscore", "def viterbi_score(confusion_networks):\n for confusion_network in confusion_networks:\n prev, score = [-infinity] * len(confusion_network), [-infinity] + [0.0] * len(confusion_network)\n for t in range(0, len(confusion_network)): # t: words in the sentence (\"bfs\")\n prev, score = score, prev\n for j in range(0, len(confusion_network[t])): # Iterates deep-first in a CN position (\"dfs\")\n score[j] = max([prev[i] +\n confusion_network[i][j][2]\n for i in range(0, len(confusion_network[t]))])\n return max([score[i] for i in range(1, len(confusion_network[t]))])", "def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)", "def NormalizePhenotypeScore(score, max_score):\n return float(score) / float(max_score) * 100", "def calculate_bleu(output_lns, refs_lns):\n return round(corpus_bleu(output_lns, [refs_lns]).score, 4)", "def question2():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_seq = \"HSGVNQLGGVFVNGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATPEVVSKIAQYKRECPSIFAWEIRDRLLSEGVCTNDNIPSVSSINRVLRNLASEKQQ\"\n frfly_seq = \"HSGVNQLGGVFVGGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATAEVVSKISQYKRECPSIFAWEIRDRLLQENVCTNDNIPSVSSINRVLRNLAAQKEQQ\"\n consensus_pax = read_protein(CONSENSUS_PAX_URL)\n \n # compute human and fruitfly global alignment matrix with consensus pax\n human_align_matrix = student.compute_alignment_matrix(human_seq, consensus_pax, score_matrix, True)\n frfly_align_matrix = student.compute_alignment_matrix(frfly_seq, consensus_pax, score_matrix, True)\n \n # compute human and fruitfly global alignment sequences\n score_human, human_align, consensus_align = student.compute_global_alignment(human_seq, consensus_pax, \n score_matrix, human_align_matrix)\n score_fly, frfly_align, consensus_align_2 = student.compute_global_alignment(frfly_seq, consensus_pax,\n score_matrix, frfly_align_matrix)\n \n # compute percentages match for human and fruitfly\n human_count = 0.0\n for index in range(len(human_align)):\n if human_align[index] == consensus_align[index]:\n human_count += 1\n \n frfly_count = 0.0\n for index in range(len(frfly_align)):\n if frfly_align[index] == consensus_align_2[index]:\n frfly_count += 1\n \n print \"% Human: \" + str(human_count / len(human_align) * 100)\n print \"Hmn: \" + human_align\n print \"PAX: \" + consensus_align\n \n print \"\"\n \n print \"% FrFly: \" + str(frfly_count / len(frfly_align) * 100)\n print \"Fly: \" + frfly_align\n print \"PAX: \" + consensus_align_2", "def define_score(self, votes_string):\n\t\t#2*REW + colleagues + post-doctorate associate + 2* JBW\n\t\tvotes = [int(x) for x in votes_string] \n\t\tweights = [2,1,1,2]\n\t\tscore = 0\n\t\tfor i in range(0, 4):\n\t\t\tif votes[i] >= 0 and votes[i] <= 2:\n\t\t\t\tscore += votes[i]*weights[i]\n\t\treturn score", "def dice_score(ground_truth, prediction):\r\n\r\n # Normalize\r\n prediction /= np.amax(prediction)\r\n ground_truth /= np.amax(ground_truth)\r\n\r\n true_positive_mask = np.logical_and(ground_truth==1, prediction==1)\r\n false_positive_mask = np.logical_and(ground_truth==0, prediction==1)\r\n false_negative_mask = np.logical_and(ground_truth==1, prediction==0)\r\n\r\n TP = np.count_nonzero(true_positive_mask)\r\n FP = np.count_nonzero(false_positive_mask)\r\n FN = np.count_nonzero(false_negative_mask)\r\n\r\n DSC = 2*TP / (2*TP + FP + FN)\r\n\r\n return DSC", "def calculate_indicator_score(self, risk_from_feed):\n dbot_score = 0\n risk_from_feed = int(risk_from_feed)\n if risk_from_feed >= self.threshold or risk_from_feed >= 65:\n dbot_score = 3\n elif risk_from_feed >= 5:\n dbot_score = 2\n\n return dbot_score", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def cal_ResBeam_Stats(infile, header_bmaj, header_bmin):\n\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n ind_nonzero_bmaj = np.nonzero(bmaj) # finding array indices of nonzero values\n ind_nonzero_bmin = np.nonzero(bmin)\n total_nbmaj = np.count_nonzero(bmaj) # count total number of bmaj non zero occurance\n total_nbmin = np.count_nonzero(bmin)\n bmaj_variance = (np.sum((bmaj[ind_nonzero_bmaj]-header_bmaj)**2.0))/total_nbmaj # using header beam value as mean \n bmin_variance = (np.sum((bmin[ind_nonzero_bmin]-header_bmin)**2.0))/total_nbmin\n bmaj_stdev = np.sqrt(bmaj_variance)\n bmin_stdev = np.sqrt(bmin_variance)\n beam_threshold = round((((header_bmaj + bmaj_stdev) * (header_bmin + bmin_stdev))/ (header_bmaj*header_bmin))-1.0, 4)\n bmaj_max = np.max(bmaj[ind_nonzero_bmaj])\n bmaj_min = np.min(bmaj[ind_nonzero_bmaj])\n bmin_max = np.max(bmin[ind_nonzero_bmin])\n bmin_min = np.min(bmin[ind_nonzero_bmin])\n max_ratio_beam_area = (bmaj_max*bmin_max)/(header_bmaj*header_bmin) # measured beam area / header beam area\n min_ratio_beam_area = (bmaj_min*bmin_min)/(header_bmaj*header_bmin)\n\n return bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_beam_area, min_ratio_beam_area", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def get_score(self, solution: np.array) -> float:\n pass", "def arsenalResults(dat):\n arsScore = int(dat[0])\n othScore = int(dat[2])\n if arsScore > othScore:\n res = 1\n elif arsScore == othScore:\n res = 2\n else:\n res = 0\n return res", "def best_score(list_bb):\n\n # Compute the number of predicted boxes\n n = len(list_bb)\n\n # if there are more than 0 predicted boxes, search for the 2 boxes\n if n != 0:\n tab_score = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n score_val = score(list_bb[i], list_bb[j])\n tab_score[i, j] = score_val\n\n # Find the maximum\n amax = np.unravel_index(tab_score.argmax(), tab_score.shape)\n\n return union(list_bb[amax[0]], list_bb[amax[1]])\n else:\n return []", "def calculate_finalscore(self):\n\n if self.count!=0:\n print(self.count)\n print(self.badGuess)\n self.finalScore=(self.total/self.count)- ((self.total/self.count)*(10*self.badGuess)/100)\n\n\n else:\n self.finalScore=self.total", "def supply_score(pickups, pickupsfromcarepackages):\n\n\t# get the total number for each supply category\n\tAttachment = pickups[\"Attachment\"] if \"Attachment\" in pickups else 0\n\tUse = pickups[\"Use\"] if \"Use\" in pickups else 0\n\tAmmunition = pickups[\"Ammunition\"] if \"Ammunition\" in pickups else 0\n\tEquipment = pickups[\"Equipment\"] if \"Equipment\" in pickups else 0\n\tWeapon = pickups[\"Weapon\"] if \"Weapon\" in pickups else 0\n\n\t# calculate care package score\n\tif pickupsfromcarepackages > 0:\n\t\tcare_package_score = 100\n\telse:\n\t\tcare_package_score = 0\n\n\t# calculate attachment score\n\tif Attachment <= 5:\n\t\tattachment_score = 50\n\telif Attachment <= 9:\n\t\tattachment_score = 75\n\telse:\n\t\tattachment_score = 100\n\n\t# calculate use score\n\tif Use <= 5:\n\t\tuse_score = 70\n\telif Use <= 10:\n\t\tuse_score = 85\n\telse:\n\t\tuse_score = 100\n\n\t# calculate equipment score\n\tif Equipment <= 5:\n\t\tequipment_score = 75\n\telif Equipment <= 10:\n\t\tequipment_score = 90\n\telse:\n\t\tequipment_score = 100\n\n\t# calculate weapon score\n\tif Weapon <= 1:\n\t\tweapon_score = 75\n\telif Weapon == 2:\n\t\tweapon_score = 90\n\telse:\n\t\tweapon_score = 100\n\n\t# calculate ammunition score\n\tif Ammunition <= 5:\n\t\tammunition_score = 50\n\telif Ammunition <= 10:\n\t\tammunition_score = 75\n\telif Ammunition <= 14:\n\t\tammunition_score = 90\n\telse:\n\t\tammunition_score = 100\n\n\tsupplies_score = (equipment_score + use_score + weapon_score + ammunition_score) * 0.225 + attachment_score * 0.1\n\tsupply_score = int(supplies_score * 0.8 + care_package_score * 0.2)\n\n\treturn supply_score", "def compute_god_score():\n\n survivals_count = 0\n for _ in range(PARALLEL_UNIVERSES_COUNT):\n best_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n if treated_survival > best_survival:\n best_survival = treated_survival\n if random.uniform(0, 1) <= best_survival:\n survivals_count += 1\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def bvcs():\r\n a = float(input(\"Please Enter Total Stockholders Equity Value: \"))\r\n b = float(input(\"Please Enter Preferred Equity Value: \"))\r\n c = float(input(\"Please Enter Number of Shares of Common Stock Outstanding: \"))\r\n d = (float(a)-float(b))/float(c)\r\n print \">> Your Book Value Per Share of Common Stock is\",round(d,2)", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def evaluate_bm(all_metrics):\n f_gt, n_gt, n_st = 0, 0, 0\n nbox_gt, nbox_st = 0, 0\n c, g, fp, missed, ids = 0, 0, 0, 0, 0\n IDTP, IDFP, IDFN = 0, 0, 0\n MT, ML, PT, FRA = 0, 0, 0, 0\n overlap_sum = 0\n for i in range(len(all_metrics)):\n nbox_gt += all_metrics[i].idmetrics.nbox_gt\n nbox_st += all_metrics[i].idmetrics.nbox_st\n\n # Total ID Measures\n IDTP += all_metrics[i].idmetrics.IDTP\n IDFP += all_metrics[i].idmetrics.IDFP\n IDFN += all_metrics[i].idmetrics.IDFN\n\n # Total ID Measures\n MT += all_metrics[i].MT\n ML += all_metrics[i].ML\n PT += all_metrics[i].PT\n FRA += all_metrics[i].FRA\n f_gt += all_metrics[i].f_gt\n n_gt += all_metrics[i].n_gt\n n_st += all_metrics[i].n_st\n c += all_metrics[i].c\n g += all_metrics[i].g\n fp += all_metrics[i].fp\n missed += all_metrics[i].missed\n ids += all_metrics[i].mme\n overlap_sum += sum(sum(all_metrics[i].d))\n\n # IDP = IDTP / (IDTP + IDFP)\n IDP = IDTP / (IDTP + IDFP) * 100\n\n # IDR = IDTP / (IDTP + IDFN)\n IDR = IDTP / (IDTP + IDFN) * 100\n\n # IDF1 = 2 * IDTP / (2 * IDTP + IDFP + IDFN)\n IDF1 = 2 * IDTP / (nbox_gt + nbox_st) * 100\n FAR = fp / f_gt\n MOTP = (overlap_sum / c) * 100\n\n # MOTAL = 1 - (# fp + # fn + #log10(ids)) / # gts\n MOTAL = (1 - (fp + missed + np.log10(ids + 1)) / g) * 100\n\n # MOTA = 1 - (# fp + # fn + # ids) / # gts\n MOTA = (1 - (fp + missed + ids) / g) * 100\n\n # recall = TP / (TP + FN) = # corrected boxes / # gt boxes\n recall = c / g * 100\n\n # precision = TP / (TP + FP) = # corrected boxes / # det boxes\n precision = c / (fp + c) * 100\n metrics = [IDF1, IDP, IDR, recall, precision, FAR, n_gt,\n MT, PT, ML, fp, missed, ids, FRA, MOTA, MOTP, MOTAL]\n return metrics", "def personal_best(scores):\n return max(scores)", "def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])", "def workout_score(a, scal):\r\n # Check if score is empty\r\n if check_if_empty(a):\r\n return np.nan\r\n if scal == 0:\r\n # Some people sign up for Rx then enter scaled scores...\r\n if check_if_scaled(a):\r\n return np.nan\r\n if a.split(\" \")[-1] == 's':\r\n return extract_score(a.replace(\" - s\",\"\"))\r\n return extract_score(a)", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def weighted_majority_vote(c_pred,m_pred,f_pred,acc_c,acc_m,acc_f, dataset):\n c,m,f = np.argmax(c_pred),np.argmax(m_pred),np.argmax(f_pred)\n coarse = np.zeros(2)\n middle = np.zeros(4)\n fine = np.zeros(10)\n\n if dataset == 'cifar10':\n middle = np.zeros(5)\n coarse[c] = 1\n middle[m] = 1\n fine[f] = 1\n res = np.zeros(10)\n w1 = np.log(acc_c/(1.-acc_c))\n w2 = np.log(acc_m/(1.-acc_m))\n w3 = np.log(acc_f/(1.-acc_f))\n if dataset == 'cifar10':\n for i in range(10):\n if i <2:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 2<=i <4:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 4 <=i<6:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n elif 6<=i<8:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[4] + w3*fine[i]\n else :\n for i in range(10):\n if i <3:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 3<=i <5:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 5 <=i<8:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n index = np.argmax(res)\n return(index)", "def __calculate_gender_diversity_score(project: dict, student: dict) -> int:\n # project_name = project[\"fields\"][PROJECT_NAME_FIELD]\n # student_name = student[\"fields\"][SURVEY_STUDENT_NAME_FIELD][0]\n\n # print(\"Calculating gender pairing score for: Project({}) - Student({})\".format(project_name, student_name))\n\n # Get the gender specified by the student\n student_gender = student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n if not student_gender:\n # The student didn't provide a gender, so we can't calculate a score\n return 0\n\n # Get the list of current assignments for the project team\n team_assignments = __get_team_assignments(project)\n\n # This list will hold the list of genders on the team\n team_gender_values = []\n for assignment in team_assignments:\n assigned_student_gender = assignment.student[\"fields\"].get(SURVEY_GENDER_FIELD, None)\n\n if assigned_student_gender:\n team_gender_values.append(assigned_student_gender)\n\n # ================================================================================================================\n # Get the count genders for the already assigned students\n gender_counter = __get_gender_counter()\n gender_counter.update(team_gender_values)\n\n # Get the count of the particular gender that matches the student\n matching_gender_count = gender_counter.get(student_gender)\n\n if matching_gender_count == 0:\n # This is good, as it will make the team more diverse\n return SURVEY_GENDER_BASE_WEIGHT\n elif matching_gender_count == 1:\n # This is better, as it will pair students with like genders\n return SURVEY_GENDER_BASE_WEIGHT * 2\n else:\n # There are already at least 2 student with this gender identity, so we won't\n # prefer this\n return 0", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5", "def calculateStatisticalSignificance():\n ##tau HCS pearson\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y = np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[1], s2=stds[1], n2=17280)\n print(\"stats for HCS pearson, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[2], s2=stds[2], n2=17280)\n print(\"stats for HCS pearson, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n \n ##tau HCS MSE\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[1], s1=stds[1], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[2], s1=stds[2], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated pearon\n ##this one is a bit more involved because we have individual means and STDs over a 3-fold cross-val\n ##we have the following for the ablated ML model (sample size, avg pearson, std), one for each fold:\n # (108330 0.7498484453029202 0.12794946936625312)\n # (108330 0.7507672277328549 0.12978897185198424) \n # (108330 0.7512250395547646 0.12858723725044444)\n ##combining to one sample we have mean = .7506, std=.1288\n ##and the following for the Null Model\n #(108330 0.3951239419846807 0.13861514301358197)\n #(108330 0.39522112186984787 0.1387019314192389)\n #(108330 0.3956142180066648 0.13832544923711507)\n ##combining this into one sample, we have: mean = 0.3953, std = .1385\n z, p = calculateZScoreAndPValue(m1=.7506, s1=.1288, n1=108330*3, m2=.3953, s2=.1385, n2=108330*3)\n print(\"stats for osteosarcoma ablated pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated MSE\n ##ML model performance:\n # (108330 0.5003031 0.25589895)\n # (108330 0.4984656 0.25957793)\n # (108330 0.49754992 0.2571745)\n ##combining to one sample we have mean = 0.4988 , std= .2576\n ##Null Model performance:\n # (108330 1.209752 0.2772303)\n # (108330 1.2095579 0.27740386)\n # (108330 1.2087716 0.27665088)\n ##combining to one sample we have mean = 1.2094 , std= 0.2771\n z, p = calculateZScoreAndPValue(m1=1.2094, s1=.2771, n1=108330*3, m2=.4988, s2=.2576, n2=108330*3)\n print(\"stats for osteosarcoma ablated MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw pearson \n ##ML model performance:\n #(108330 0.8487535502148598, 0.0750789260880985)\n #(108330 0.8482422038817274, 0.0749674444367002)\n # (108330 0.8500693686258434, 0.07491226209365953)\n ##combining to one sample we have mean = .849 , std= 0.075\n ##Null model performance:\n #(108330 0.44372635525546694, 0.11585072713296693)\n #(108330 0.4440357996615424, 0.11573081667714848)\n # (108330 0.4443288449364213, 0.11528081384708891)\n ##combining to one sample we have mean = 0.444 , std= 0.1156\n z, p = calculateZScoreAndPValue(m1=.849, s1=0.075, n1=108330*3, m2=0.444, s2=0.1156, n2=108330*3)\n print(\"stats for osteosarcoma raw pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw MSE\n ##ML model performance:\n #(108330 0.3024929, 0.15015785)\n #(108330 0.3035156, 0.1499349)\n # (108330 0.29986125, 0.14982451)\n ##combining to one sample we have mean = 0.302 , std= 0.15\n ##Null model performance\n # (108330 1.1125473, 0.23170146)\n # (108330 1.1119285, 0.23146166)\n # (108330 1.1113423, 0.23056163)\n ##combining to one sample we have mean = 1.1119 , std= 0.2312\n z, p = calculateZScoreAndPValue(m1=1.1119, s1=0.2312, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for osteosarcoma raw MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated pearson\n z, p = calculateZScoreAndPValue(m1=0.849, s1=0.075, n1=108330*3, m2=0.7506, s2=0.1288, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated pearson: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated MSE\n z, p = calculateZScoreAndPValue(m1=.4988, s1=.2576, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated MSE: z: {}, p: {}\".format(z, p))", "def kaggle_metric(predictions, exact_values):\n norm = 70.*len(predictions)\n \n score = 0.\n for p,e in zip(predictions,exact_values):\n score += N.sum((heaviside(p)-heaviside(e))**2)\n\n return score/norm", "def getScores(self, w1, w2, w3):\r\n Fw = 2.26 * 3\r\n score = round((float(w1) * float(w2) * float(w3)) ** Fw, 6) # Keep six decimal places\r\n return score", "def classify(self, data):\n score_mappings = self.score(data)\n\n #update this logic to return max or the first thing in sorted list\n\n # score_mappings[\"2\"] = 0.009015777610818933 \n\n # print(score_mappings)\n\n max_value = score_mappings[max(score_mappings, key=score_mappings.get)]\n\n # print(max_value)\n\n score_mappings = dict(filter(lambda x: x[1] == max_value, score_mappings.items()))\n\n # print(score_mappings)\n\n return sorted(score_mappings)[0]", "def get_r2_score(ground_truth, predicted):\n residual = np.sum(np.square(np.subtract(ground_truth, predicted)))\n print(residual)\n total = np.sum(np.square(np.subtract(ground_truth, np.mean(ground_truth))))\n print(total)\n return np.subtract(1.0, np.divide(residual, (total + 0.00000000001)))", "def biodiversity_graph(self, graph=\"percentage\"):\n plt.ioff()\n fig, ax = plt.subplots()\n index = np.arange(7)\n bar_width = 0.3\n offset = bar_width / 2\n if graph == \"score\":\n if self.PotTax_reference is not None:\n # this requires a deepcopy, otherwise the xticks updates also\n # updates the PotTax_percentage indexes.\n xticks = deepcopy(self.PotTax_reference.index.values)\n for i, item in enumerate(xticks):\n if item == \"DragonDamselflies\":\n xticks[i] = \"Dragon &\\nDamselflies\"\n if item == \"HigherPlants\":\n xticks[i] = \"Higher\\nPlants\"\n #label = (\"reference: \" +\n # str(round(self.PotTax_reference.sum().TFI, 2)))\n label = \"initial board\"\n reference = ax.bar(\n index-offset, self.PotTax_reference.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n if self.PotTax_intervention is not None:\n #label = (\"intervention: \" +\n # str(round(self.PotTax_intervention.sum().TFI, 2)))\n label = \"current board\"\n intervention = ax.bar(\n index+offset, self.PotTax_intervention.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n ax.set_title(\"Biodiversity scores\")\n ax.set_ylabel(\"total value\")\n legend = ax.legend(loc='best', facecolor='black', edgecolor='w',\n fancybox=True, framealpha=0.5, fontsize=\"large\")\n plt.setp(legend.get_texts(), color='w')\n else:\n if self.PotTax_percentage is not None:\n # this requires a deepcopy, otherwise the xticks updates also\n # updates the PotTax_percentage indexes.\n xticks = deepcopy(self.PotTax_percentage.index.values)\n for i, item in enumerate(xticks):\n if item == \"DragonDamselflies\":\n xticks[i] = \"Dragon &\\nDamselflies\"\n if item == \"HigherPlants\":\n xticks[i] = \"Higher\\nPlants\"\n data = self.PotTax_percentage.values.flatten()\n percentage = ax.bar(\n index, data, bar_width, label=\"percentage\",\n tick_label=xticks)\n ax.set_title(\"Biodiversity change\")\n ax.set_ylabel(\"change (%)\")\n # the xticks rotation could probably be handled better.\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n tick.set_fontsize(14)\n # set the color of all figure borders, axis ticks and text to white.\n ax.spines['bottom'].set_color('w')\n ax.spines['top'].set_color('w') \n ax.spines['right'].set_color('w')\n ax.spines['left'].set_color('w')\n ax.tick_params(axis='x', colors='w')\n ax.tick_params(axis='y', colors='w')\n ax.yaxis.label.set_color('w')\n ax.yaxis.label.set_fontsize(14)\n ax.xaxis.label.set_color('w')\n ax.xaxis.label.set_fontsize(14)\n ax.title.set_fontsize(20)\n ax.title.set_color('w')\n plt.tight_layout()\n if graph == \"score\":\n plt.savefig(os.path.join(self.web_dir, \"biodiversity_score1.png\"),\n edgecolor='w',transparent=True)\n else:\n plt.savefig(os.path.join(self.web_dir, \"biodiversity_score2.png\"),\n edgecolor='w',transparent=True)\n plt.close(fig)\n return", "def calc_par_clf(irr_values):\n par_values = [calculate_par(round(float(v))) for v in irr_values]\n total_count = len(par_values)\n count = [0, 0, 0]\n for v in par_values:\n if v <= 3.13:\n count[0] += 1\n elif v <= 5.48:\n count[1] += 1\n else:\n count[2] += 1\n\n # convert to %\n count = [round(c * 100 / total_count, 2) for c in count]\n\n return par_values, {'low': count[0], 'medium': count[1], 'high': count[2]}", "def test_small_round_numbers_99_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 99\r\n expected_result = 6.94700\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)", "def davies_bouldin_score(X, labels):\n\tX, labels = sklearn.utils.check_X_y(X, labels)\n\tle = sklearn.preprocessing.LabelEncoder()\n\tlabels = le.fit_transform(labels)\n\tn_samples, _ = X.shape\n\tn_labels = len(le.classes_)\n\tif not 1 < n_labels < n_samples:\n\t\traise ValueError(\"Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)\" % n_labels)\n\n\tintra_dists = np.zeros(n_labels)\n\tcentroids = np.zeros((n_labels, len(X[0])), dtype=np.float)\n\tfor k in range(n_labels):\n\t\tcluster_k = sklearn.utils.safe_indexing(X, labels == k)\n\t\tcentroid = cluster_k.mean(axis=0)\n\t\tcentroids[k] = centroid\n\t\tintra_dists[k] = np.average(sklearn.metrics.pairwise.pairwise_distances(cluster_k, [centroid]))\n\n\tcentroid_distances = sklearn.metrics.pairwise.pairwise_distances(centroids)\n\n\n\tif np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):\n\t\treturn 0.0\n\n\tscore = (intra_dists[:, None] + intra_dists) / centroid_distances\n\tscore[score == np.inf] = np.nan\n\n\treturn np.mean(np.nanmax(score, axis=1))", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def dice_score(binary_image, binary_control):\n # figure_of_control(binary_control, 'Optimal given threshold')\n match = creation_of_match_array(binary_image, binary_control)\n # figure_of_control(match, 'deviation of optimal threshold and otsu')\n true = sum(sum(match))\n false = np.size(match) - true\n score = 2 * true / (2 * true + false)\n # print(\"True hits: \", true)\n # print(\"False hits: \", false)\n # print('Dice score: ', score)\n return score", "def BHS_standard(err):\n \n leq5 = 0\n leq10 = 0\n leq15 = 0\n \n for i in range(len(err)):\n \n if(abs(err[i])<=5):\n leq5 += 1\n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=10): \n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=15): \n leq15 += 1\n \n \n \n return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))", "def _computeOverallScore(scalars):\n scores = {}\n variables = [v for v in scalars.variables.keys() if \"Score\" in v and \"Overall\" not in v]\n for region in self.regions:\n overall_score = 0.\n sum_of_weights = 0.\n for v in variables:\n if region not in v: continue\n score = v.replace(region,\"\").strip()\n weight = 1.\n if self.weight.has_key(score): weight = self.weight[score]\n overall_score += weight*scalars.variables[v][...]\n sum_of_weights += weight\n overall_score /= max(sum_of_weights,1e-12)\n scores[\"Overall Score %s\" % region] = overall_score\n return scores", "def gbetter(res1,res2):\n \n better = -1 # default unless proven wrong \n dbic = 0 # same to start with\n \n rms1,noise1,par1 = res1.get('rms'),res1.get('noise'),res1.get('par')\n rms2,noise2,par2 = res2.get('rms'),res2.get('noise'),res2.get('par')\n \n # Calculate Bayesian Information Criterion (BIC)\n # lower BICs are better\n bic1 = utils.bayesinfocrit(res1)\n bic2 = utils.bayesinfocrit(res2)\n dbic = bic1-bic2\n\n # Solution 1 is better\n if dbic <= 0:\n better = 0\n # Solution 2 is better\n if dbic > 0 :\n better = 1\n\n return better,dbic\n\n # ---------- OLD CODE, NOT USED ANYMORE ----------\n \n # In case either one is -1 (bad)\n if par1 is not None and par2 is not None:\n if (rms1 == -1) and (rms2 != -1): \n better = 1\n if (rms1 != -1) and (rms2 == -1): \n better = 0 \n if (rms1 == -1) and (rms2 == -1): \n better = -1 \n if (rms1 == -1) or (rms2 == -1): \n return better,dbic\n if (len(par1) < 3) and (len(par2) >= 3): \n better = 1 \n if (len(par2) < 3) and (len(par1) >= 3): \n better = 0 \n if (len(par1) < 3) or (len(par2) < 3): \n return better,dbic\n\n # One is bad, second is better\n if par1 is None:\n return -1,dbic\n \n # Two is bad, first is better \n if par2 is None:\n return -1,dbic\n \n drms1 = rms1-noise1 \n drms2 = rms2-noise2 \n n1 = len(par1)/3 \n n2 = len(par2)/3 \n \n # Clear cut, rms better, n equal or less \n if (drms1 < drms2) and (n1 <= n2): \n better = 0 \n if (drms1 > drms2) and (n1 >= n2): \n better = 1 \n \n # RMS same, N different \n if (drms1 == drms2) and (n1 <= n2): \n better = 0 \n if (drms1 == drms2) and (n1 > n2): \n better = 1 \n \n # Mixed bag, lower RMS but higher N\n if (drms1 < drms2) and (n1 > n2): \n ddrms = drms2-drms1 \n rdrms = ddrms/drms2 # ratio compared to worse one \n dn = n1-n2 \n \n better = 1 # default \n if (dn == 1) and (rdrms > 0.2) : \n better = 0 \n if (dn == 2) and (rdrms > 0.5) : \n better = 0 \n if (dn == 3) and (rdrms > 1.0) : \n better = 0 \n if (dn >= 4) and (rdrms > 2.0) : \n better = 0 \n \n if (drms2 < drms1) and (n2 > n1): \n ddrms = drms1-drms2 \n rdrms = ddrms/drms1 # ratio compared to worse one \n dn = n2-n1 \n \n better = 0 # default \n if (dn == 1) and (rdrms > 0.2) : \n better = 1 \n if (dn == 2) and (rdrms > 0.5) : \n better = 1 \n if (dn == 3) and (rdrms > 1.0) : \n better = 1 \n if (dn >= 4) and (rdrms > 2.0) : \n better = 1 \n \n return better,dbic", "def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start", "def test_calculate_class_2_individuals_best_response_markov_example_2():\n assert round(\n calculate_class_2_individuals_best_response(\n lambda_2=6,\n lambda_1_1=2,\n lambda_1_2=3,\n mu_1=5,\n mu_2=2,\n num_of_servers_1=3,\n num_of_servers_2=4,\n threshold_1=7,\n threshold_2=9,\n system_capacity_1=10,\n system_capacity_2=10,\n buffer_capacity_1=10,\n buffer_capacity_2=10,\n ),\n NUMBER_OF_DIGITS_TO_ROUND,\n ) == round(0.8224704160104401, NUMBER_OF_DIGITS_TO_ROUND)", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def tpr95(in_softmax_scores, out_softmax_scores):\n # 1. Init result & counter\n result, counter = 0.0, 0\n # 2. Traverse delta\n # (1) Get delta_list\n reversed_in_softmax_scores = np.sort(in_softmax_scores)[::-1]\n upper_num = int(0.9505 * len(reversed_in_softmax_scores))\n lower_num = int(0.9495 * len(reversed_in_softmax_scores))\n if upper_num == lower_num:\n delta_list = [reversed_in_softmax_scores[upper_num]]\n else:\n delta_list = reversed_in_softmax_scores[lower_num:upper_num]\n # (2) Traversing\n for delta in delta_list:\n fpr = np.sum(out_softmax_scores >= delta) * 1.0 / len(out_softmax_scores)\n result += fpr\n counter += 1\n # 3. Get result\n result = result / counter\n # Return\n return result", "def goodnessScore(roombas, obstacles, targeting, C1=3, C2=1, C3=1, C4=1, C5=2):\n\n def headingScore(roomba):\n # print(roomba.visible_location.pose.pose.orientation)\n heading = orientationToHeading(roomba.visible_location.pose.pose.orientation)\n\n return abs(np.sin(heading))\n\n def positionScore(roomba):\n # the closer to the center, the lower the xScore, from 0 to 1\n posX = roomba.visible_location.pose.pose.position.x\n posY = roomba.visible_location.pose.pose.position.y\n if(abs(posX) > 9.95 or abs(posY) > 9.95):\n return -10000\n xScore = abs(posX) / 10\n # the closer to the green zone, the higher the yScore, from 0 to 1\n yScore = posY / 20 + 0.5\n return (xScore + yScore)/2\n\n def distanceFromObstaclesScore(roomba, obstacles):\n \"\"\"\n (-infinity, 0)\n \"\"\"\n\n score = 0\n MIN_OBSTACLE_DISTANCE = 0.5\n for obstacle in obstacles:\n x = roomba.visible_location.pose.pose.position.x - obstacle.visible_location.pose.pose.position.x\n y = roomba.visible_location.pose.pose.position.y - obstacle.visible_location.pose.pose.position.y\n dist = np.sqrt(x ** 2 + y ** 2)\n\n if dist < MIN_OBSTACLE_DISTANCE:\n return -10000\n\n score -= 1 / dist ** 2\n\n return score\n\n def stateQualtityScore(roomba):\n \"\"\"\n How precisely we know the Roombas' state.\n Compare position accuracy to view radius to know if it's possible\n to see the given roomba when drone arrives.\n \"\"\"\n return 0\n\n def sameRoomba(roomba, targeting):\n if(targeting != None and roomba.frame_id == targeting.frame_id):\n return 1\n else:\n return 0\n\n result = []\n\n for i in xrange(0, len(roombas)):\n roomba = roombas[i]\n\n score = C1 * headingScore(roomba) + \\\n C2 * positionScore(roomba) + \\\n C3 * distanceFromObstaclesScore(roomba, obstacles) + \\\n C4 * stateQualtityScore(roomba) + \\\n C5 * sameRoomba(roomba, targeting)\n result.append((roomba, score))\n\n return result", "def score(str_bytes):\n freq_score = sum([character_frequencies.get(chr(letter).lower(), -100) for letter in str_bytes])\n return math.ceil(freq_score * 100) / 100", "def get_DeepSpCas9_score(gRNA_list):\n\turl=\"http://deepcrispr.info/DeepSpCas9/\"\n\tbr = mechanize.Browser()\n\tbr.set_handle_robots(False) # ignore robots\n\tbr.open(url)\n\tbr.select_form(nr=0)\n\tbr[\"ENTER_FASTA\"] = list_to_fasta(gRNA_list)\n\tres = br.submit()\n\toutput = res.read()\n\tres = parse_webpage(output)\n\tflag = False\n\tfor i in gRNA_list:\n\t\tif not i[:20] in res:\n\t\t\tprint (\"gRNA: %s NOT FOUND!\"%(i[:20]))\n\t\t\tprint (\"DeepSpCas9 API error!\")\n\t\t\tflag = True\n\t\t\tres[i[:20]] = 0\n\tif flag:\n\t\tprint (output)\n\t\tprint (res)\n\treturn res", "def score_int( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return 80000 + 100*order(hand[4])\n #straight_flush\n elif flush(hand) and straight(hand):\n return 80000 + 100*order(hand[4])\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return 70000 + 100*order(m[0].card)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return 60000 + 100*order(m[0].card) + order(m[1].card)\n #flush\n elif flush(hand):\n return 50000 + 100*order(hand[4])\n #straight\n elif straight(hand):\n return 40000 + 100*order(hand[4])\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return 30000 + 100*order(m[0].card)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return 20000 + 100*order(m[0].card) + order(m[1].card)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return 10000 + 100*order(m[0].card) + order(m[1].card)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return 100*order(hand[4]) # or 100*order(m[0].card)", "def free_bacon(opponent_score):\n # BEGIN PROBLEM 2\n a, b = opponent_score % 10, opponent_score // 10 # separation into digits\n return (max(a, b) + 1)\n # END PROBLEM 2", "def part3c_2():\n xs = \"Werner & Co entered court today . Werner maintained that they were not guilty .\".split()\n N = 10000\n\n submission.computeGibbsProbabilities( englishCRF,\n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, N )\n grader.requireIsTrue(True)", "def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict:\n return {\"bleu\": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)}", "def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict:\n return {\"bleu\": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)}", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def get_dividends_score(self) -> Address:\n return self._dividends_score.get()", "def AmOppCr(_cmp, e87482, e87487, e87492, e87497):\n\n \"\"\"\n This function calculates American Opportunity Credit\n for up to four eligible students\n\n \"\"\"\n\n # Expense should not exceed the cap of $4000.\n if _cmp == 1:\n\n c87482 = max(0., min(e87482, 4000.))\n c87487 = max(0., min(e87487, 4000.))\n c87492 = max(0., min(e87492, 4000.))\n c87497 = max(0., min(e87497, 4000.))\n else:\n c87482, c87487, c87492, c87497 = 0., 0., 0., 0.\n\n # Credit calculated as 100% of the first $2000 expense plus\n # 25% of amount exceeding $2000.\n if max(0, c87482 - 2000) == 0:\n c87483 = c87482\n else:\n c87483 = 2000 + 0.25 * max(0, c87482 - 2000)\n\n if max(0, c87487 - 2000) == 0:\n c87488 = c87487\n else:\n c87488 = 2000 + 0.25 * max(0, c87487 - 2000)\n\n if max(0, c87492 - 2000) == 0:\n c87493 = c87492\n else:\n c87493 = 2000 + 0.25 * max(0, c87492 - 2000)\n\n if max(0, c87497 - 2000) == 0:\n c87498 = c87497\n else:\n c87498 = 2000 + 0.25 * max(0, c87497 - 2000)\n\n # Sum of credits of all four students.\n c87521 = c87483 + c87488 + c87493 + c87498\n\n return (c87482, c87487, c87492, c87497, c87483, c87488, c87493, c87498,\n c87521)", "def classify(x, c, b):\n if x<c-b:\n return 0\n elif x>c+b:\n return 1\n else:\n if b>10**-7:\n return (x-c+b)/2/b\n else:\n return 0.5", "def base_contribute_score():\n return 1", "def BayesPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n problemBounds = {\"Bfield\": choco.uniform(10, 1300), \"T\": choco.uniform(50, 230), \"Btheta\": choco.uniform(0, 90), \"Etheta\": choco.uniform(0, 90)}\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Set up the database for the chocolate optimiser.\n connection = choco.SQLiteConnection(\"sqlite:///bayes_paper_\" + str(rank) + \"_db.db\")\n\n # Define which solver will be used.\n solver = choco.Bayes(connection, problemBounds, utility_function = \"ei\", n_bootstrap = int(np.ceil(maxIters/10)), clear_db = True)\n\n # Clear the database. TODO: To do this?\n connection.clear()\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n try:\n token, nextParams = solver.next()\n except:\n print(\"Error suggesting a new point. Here are the last set of parameters sampled:\")\n print(str(nextParams))\n print(\"Iteration number: \" + str(iteration))\n continue\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = abs(FitnessPaper(**nextParams))\n\n # Update best FoM.\n if fEval >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n solver.update(token, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def test_small_round_numbers_98_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 98\r\n expected_result = 7.67748\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def get_score(fps: float, ssim: float,\n ssim_max: float = 30.,\n rate: float = 10.) -> float:\n fps = np.round(fps, 1)\n ssim = np.round(ssim, 3)\n return 0.1 * np.log(fps) / np.log(rate) + min(ssim, ssim_max)", "def score(self):", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def calculate_gpa(score):\n if score < 60:\n return 0\n elif 60 <= score < 70:\n return 1\n elif 70 <= score < 80:\n return 2\n elif 80 <= score < 90:\n return 3\n elif score >= 90:\n return 4", "def scores(self):\n\t\tseqLengths = []\n\t\tfor x in self.contigsInfo.keys():\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tseqLengths.append(len(seq))\n\n\t\tseqLengths = sorted(seqLengths)\t\n\t\tmax_length = max(seqLengths)\n\t\tmin_length = min(seqLengths)\n\t\tmean_length = np.mean(seqLengths)\t\n\n\n\t\tmidLength = sum(seqLengths)/2\n\n\t\tcomputedMidLength = 0\n\t\tl50 = 0\n\t\tn50 = 0\n\t\tfor i,x in enumerate(seqLengths):\n\t\t\tif (midLength < computedMidLength):\n\t\t\t\tn50 = i\n\t\t\t\tl50 = x \n\t\t\t\tbreak\n\t\t\tcomputedMidLength += x\n\n\t\tscoresDict = {'number_of_contigs':len(seqLengths), 'smallestContig':min_length, 'meanContig':mean_length, \n\t\t'n50':n50, 'l50':l50, 'largestContig':max_length, 'lengthOfAssembly':sum(seqLengths)}\n\t\treturn scoresDict", "def personal_best(scores: list) -> int:\n return max(scores)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS", "def _score_to_decision(self, score):", "def compute_scores(self):\n if self.num_classes == 2:\n score_1 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold=0.5,\n )[1]\n\n score_2 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold_ratio=0.5,\n )[1]\n\n score_3 = self.competition_metric(\n impact_threshold=0.5,\n )[1]\n else:\n score_1 = self.detection_metric(threshold=0.1)\n score_2 = self.detection_metric(threshold=0.25)\n score_3 = self.detection_metric(threshold=0.5)\n\n return score_1, score_2, score_3", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore" ]
[ "0.6018452", "0.589307", "0.58814985", "0.58546895", "0.5849375", "0.5819225", "0.57758874", "0.5774465", "0.573999", "0.571513", "0.56816", "0.56671625", "0.56617856", "0.5648553", "0.5645618", "0.56252235", "0.55905104", "0.5585876", "0.5570236", "0.55351394", "0.550514", "0.5499441", "0.549548", "0.54920506", "0.54892886", "0.54666096", "0.5452374", "0.5436716", "0.5432589", "0.5422339", "0.541007", "0.5401808", "0.540114", "0.5387281", "0.5387181", "0.53680956", "0.5365741", "0.53621924", "0.5361431", "0.5360491", "0.53591585", "0.5355615", "0.5353186", "0.53528535", "0.5351295", "0.53438586", "0.5340739", "0.5338043", "0.5338043", "0.5329009", "0.5319276", "0.5319248", "0.53131557", "0.530622", "0.5305648", "0.5297552", "0.52974534", "0.52971166", "0.52918166", "0.5285302", "0.5281437", "0.527865", "0.527538", "0.52720416", "0.5262802", "0.5251447", "0.5251385", "0.5250647", "0.5248151", "0.5242032", "0.52407587", "0.52392185", "0.52392185", "0.52370137", "0.52359366", "0.5234142", "0.52334756", "0.52330536", "0.52311105", "0.5230794", "0.52256876", "0.52256876", "0.5224814", "0.52212626", "0.52173704", "0.5214557", "0.5212175", "0.5211498", "0.52033854", "0.5201892", "0.520027", "0.5199206", "0.519827", "0.51965165", "0.5190764", "0.51895756", "0.51809675", "0.5179754", "0.5177394", "0.51748997" ]
0.5997721
1
Function to plot the biosafe output, not called as such in the Virtual River. biodiversity_graph() is called instead in Virtual River.
def plot(self): # plot the data for checking fig, [[ax1,ax2],[ax3,ax4], [ax5,ax6]] = plt.subplots( 3,2, figsize=(10,8)) # Relative height self.board_reference.plot( column='z_reference', cmap='GnBu_r', legend=True, ax=ax1) self.board_intervention.plot( column='z_reference', cmap='GnBu_r', legend=True, ax=ax2) # Landuse self.board_reference.plot( column='landuse', legend=True, ax=ax3, cmap='viridis', scheme='equal_interval', k=11) self.board_intervention.plot( column='landuse', legend=True, ax=ax4, cmap='viridis', scheme='equal_interval', k=11) index = np.arange(7) xticks = self.PotTax_reference.index.values bar_width = 0.3 # plot the initial and new situation comparison label = ("reference: " + str(round(self.PotTax_reference.sum().TFI, 2))) reference = ax5.bar( index, self.PotTax_reference.values.flatten(), bar_width, label=label, tick_label=xticks) label = ("intervention: " + str(round(self.PotTax_intervention.sum().TFI, 2))) intervention = ax5.bar( index+bar_width, self.PotTax_intervention.values.flatten(), bar_width, label=label, tick_label=xticks) ax5.set_ylabel("total value") ax5.legend(loc='best') for tick in ax5.get_xticklabels(): tick.set_rotation(90) # plot the percentage increase/decrease between the initial and new # situation data = self.PotTax_percentage.values.flatten() percentage = ax6.bar( index, data, bar_width, label="percentage", tick_label=xticks) ax6.set_ylabel("increase (%)") minimum = min(data) maximum = max(data) size = len(str(int(round(maximum)))) maximum = int(str(maximum)[:1]) maximum = (maximum + 1) * (10**(size-1)) ax6.set_ylim([min(0, minimum), maximum]) for tick in ax6.get_xticklabels(): tick.set_rotation(90)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_graph(self) -> None:", "def biodiversity_graph(self, graph=\"percentage\"):\n plt.ioff()\n fig, ax = plt.subplots()\n index = np.arange(7)\n bar_width = 0.3\n offset = bar_width / 2\n if graph == \"score\":\n if self.PotTax_reference is not None:\n # this requires a deepcopy, otherwise the xticks updates also\n # updates the PotTax_percentage indexes.\n xticks = deepcopy(self.PotTax_reference.index.values)\n for i, item in enumerate(xticks):\n if item == \"DragonDamselflies\":\n xticks[i] = \"Dragon &\\nDamselflies\"\n if item == \"HigherPlants\":\n xticks[i] = \"Higher\\nPlants\"\n #label = (\"reference: \" +\n # str(round(self.PotTax_reference.sum().TFI, 2)))\n label = \"initial board\"\n reference = ax.bar(\n index-offset, self.PotTax_reference.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n if self.PotTax_intervention is not None:\n #label = (\"intervention: \" +\n # str(round(self.PotTax_intervention.sum().TFI, 2)))\n label = \"current board\"\n intervention = ax.bar(\n index+offset, self.PotTax_intervention.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n ax.set_title(\"Biodiversity scores\")\n ax.set_ylabel(\"total value\")\n legend = ax.legend(loc='best', facecolor='black', edgecolor='w',\n fancybox=True, framealpha=0.5, fontsize=\"large\")\n plt.setp(legend.get_texts(), color='w')\n else:\n if self.PotTax_percentage is not None:\n # this requires a deepcopy, otherwise the xticks updates also\n # updates the PotTax_percentage indexes.\n xticks = deepcopy(self.PotTax_percentage.index.values)\n for i, item in enumerate(xticks):\n if item == \"DragonDamselflies\":\n xticks[i] = \"Dragon &\\nDamselflies\"\n if item == \"HigherPlants\":\n xticks[i] = \"Higher\\nPlants\"\n data = self.PotTax_percentage.values.flatten()\n percentage = ax.bar(\n index, data, bar_width, label=\"percentage\",\n tick_label=xticks)\n ax.set_title(\"Biodiversity change\")\n ax.set_ylabel(\"change (%)\")\n # the xticks rotation could probably be handled better.\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n tick.set_fontsize(14)\n # set the color of all figure borders, axis ticks and text to white.\n ax.spines['bottom'].set_color('w')\n ax.spines['top'].set_color('w') \n ax.spines['right'].set_color('w')\n ax.spines['left'].set_color('w')\n ax.tick_params(axis='x', colors='w')\n ax.tick_params(axis='y', colors='w')\n ax.yaxis.label.set_color('w')\n ax.yaxis.label.set_fontsize(14)\n ax.xaxis.label.set_color('w')\n ax.xaxis.label.set_fontsize(14)\n ax.title.set_fontsize(20)\n ax.title.set_color('w')\n plt.tight_layout()\n if graph == \"score\":\n plt.savefig(os.path.join(self.web_dir, \"biodiversity_score1.png\"),\n edgecolor='w',transparent=True)\n else:\n plt.savefig(os.path.join(self.web_dir, \"biodiversity_score2.png\"),\n edgecolor='w',transparent=True)\n plt.close(fig)\n return", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def _plot(self):\r\n fig = plt.figure()\r\n\r\n # Take out second component of intensity if needed\r\n # if self._vna.isTwoComponents():\r\n # intensitySimplified = []\r\n # for i in range(len(self._intensity)):\r\n # tempSet = []\r\n # for j in range(len(self._intensity[i])):\r\n # if (j%2) == 0:\r\n # tempSet.append(self._intensity[i][j])\r\n # intensitySimplified.append(tempSet)\r\n # for i in range(len(self._frequency)):\r\n # plt.plot(self._frequency[i],intensitySimplified[i],label=('%sv' % self._voltages[i][0]))\r\n # else:\r\n for i in range(len(self._frequency)):\r\n plt.plot(self._frequency[i],self._intensity[i],label=('%sv' % self._voltages[i][0]))\r\n plt.legend(loc='upper left')\r\n fig.suptitle('Intensity-Frequency with non-Constant Voltage', fontsize=18)\r\n plt.xlabel('Frequency (Hz)', fontsize=18)\r\n plt.ylabel('Intensity (dBm)', fontsize=16)\r\n\r\n # Save plot\r\n self._saveFig()", "def plot(self):\n pass", "def _plot_graph(self) -> None:\n ghg_data, bird_data = self._datasets\n model = self._selection.get_model(ghg_data, bird_data)\n model.plot_data('Percent Change in Bird population (from 1970) vs '\n 'Amount of Greenhouse gas produced in a year',\n 'Amount of Greenhouse gas produced in a year (kt)',\n 'Percent Change in Bird population (from 1970)')", "def show():\n\tplt.show()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot():\n pass", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def show():\n setup()\n plt.show()", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def setup_plot(self):\n\n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n \n # Current healthcare status\n self.healthcare_status = \"Normal\"\n \n # Scatter plots to plot people\n self.scat = self.ax.scatter(healthy_x,\n healthy_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"lightsteelblue\", s=10)\n self.scat2 = self.ax.scatter(infected_x,\n infected_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indianred\", s=10)\n self.scat3 = self.ax.scatter(immune_x,\n immune_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"mediumseagreen\", s=10)\n self.scat4 = self.ax.scatter(dead_x,\n dead_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indigo\", s=10)\n # Lists for line graph\n self.infected = []\n self.infected_total = []\n self.deaths = []\n self.frames = []\n self.immunes = []\n self.infected.append(len(infected_x))\n self.deaths.append(len(dead_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.immunes.append(len(immune_x))\n self.frames.append(0)\n\n # Line graph plotting number\n self.total_infected, = self.ax1.plot(self.frames, self.infected_total)\n self.currently_infected, = self.ax1.plot(self.frames, self.infected, c=\"indianred\", label='Currently Infected')\n self.total_deaths, = self.ax1.plot(self.frames, self.deaths, c=\"indigo\", label='Total Dead')\n self.total_immune, = self.ax1.plot(self.frames, self.immunes, c=\"mediumseagreen\", label='Total Immune')\n\n # Code below prints statistics \n if(self.putil.enforce_social_distance_at > 0):\n self.ax1.plot([self.putil.enforce_social_distance_at]*2, [0,self.putil.size],c=\"gold\", label=\"Social Distancing\")\n self.social_distancing_info = (\"At frame \" + str(self.putil.enforce_social_distance_at))\n self.social_distancing_num = str(int(self.putil.social_distance_per * self.putil.size)) + \" or \" + str(self.putil.social_distance_per*100)+\"%\"\n else:\n self.social_distancing_info = (\"Disabled\")\n self.social_distancing_num = \"0 or 0%\"\n\n if(self.putil.enforce_mask_wearing_at > 0):\n self.ax1.plot([self.putil.enforce_mask_wearing_at]*2, [0,self.putil.size],c=\"hotpink\", label=\"Mask Mandate\")\n self.mask_wearing_info = \"At frame \" + str(self.putil.enforce_mask_wearing_at) \n else:\n self.mask_wearing_info = \"Disabled\"\n\n self.ax1.tick_params(axis=\"y\",direction=\"in\", pad=3)\n self.ax1.plot([0,1000],[self.putil.virus.total_healthcare_capacity]*2, c=\"silver\")\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.legend(prop={'size': 8},loc='upper right')\n self.ax2.text(0,1,\"Statistics\", fontsize='large' , fontweight='bold')\n self.ax2.text(0,-0.5, \"Frame:\\nCurrently Infected:\\nHealthy People:\\nImmune People:\\nTotal Deaths:\\nHealthcare Conditions:\")\n self.ax2.text(0.54,-0.5, \"Population:\\nMasks Wearing:\\nSocial Distancing:\\nPeople Distancing:\\nTotal Infected:\\n\")\n self.ax.text(0,1.06, \"Simulation\", fontsize='xx-large' , fontweight='bold')\n self.text = self.ax2.text(0.33, -0.5, \"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" %(0,len(infected_x),str(len(healthy_x)) + \" or 0%\", str(len(immune_x)) + \" or 0%\",str(len(dead_x)) + \" or 0%\",self.healthcare_status))\n self.text2 = self.ax2.text(0.81,-0.5,\"%d \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n\n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected, self.total_infected,", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def main():\n df = prof_conv_bwd_filter()\n df.to_csv(\"prof.cudnnConvBwdFilter.csv\")\n\n \"\"\"visualization, Roofline model\"\"\"\n df = pd.read_csv('prof.cudnnConvBwdFilter.csv', header=0, index_col=0)\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1, 1, 1)\n plot_rooline (ax, MACHINE_SPEC, PEAK_PERF, BAND_WIDTH)\n plot_result (ax, df)\n # fig.subplots_adjust(right=0.8)\n plt.subplots_adjust(left=0.1, right=0.6)\n plt.savefig('roofline.png')\n return", "def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return", "def plot_barcodes(output_path,M_max,max_dim,normalized=False,output_name=None):\n plots_folder = os.path.join(output_path,'plots')\n if output_name is None: \n aux = ['barcodes']\n input_file = os.path.join(output_path,'outputs_PDS.csv')\n else:\n aux = [output_name,'barcodes']\n input_file = os.path.join(output_path,'%s_PDS.csv' % output_name)\n if normalized is True:\n aux.append('normalized')\n plot_file_name = '-'.join(aux)+'.png'\n plot_file_name = os.path.join(plots_folder,plot_file_name)\n\n data = pd.read_csv(input_file,index_col = 0) \n\n fig_size_aux = 15/(3-max_dim)\n plt.figure(figsize=(fig_size_aux,5))\n for j in range(1,max_dim+2):\n plt.subplot(1,max_dim+1,j)\n L = len(data[data.dimH==j-1].death.values)\n factor=np.sqrt(L);\n for i,pair in enumerate(zip(data[data.dimH==j-1].birth.values,data[data.dimH==j-1].death.values)):\n if(normalized):\n plt.plot([float(pair[0])/float(M_max),float(pair[1])/float(M_max)],[factor*(L-i), factor*(L-i)],'o-');\n else:\n plt.plot([float(pair[0]),float(pair[1])],[factor*(L-i), factor*(L-i)],'o-');\n plt.yticks([])\n if(normalized):\n plt.xlim((0,1))\n plt.xlabel('Normalized persistences')\n plt.legend([],loc=1,title='dim %i'%(j-1))\n plt.suptitle('Barcodes')\n plt.savefig(plot_file_name)\n print 'Saved barcodes plot in %s'%plot_file_name\n else:\n plt.xlim((0,M_max))\n plt.xlabel('persistence')\n plt.legend([],loc=1,title='dim %i'%(j-1))\n plt.suptitle('Barcodes')\n plt.savefig(plot_file_name)\n print 'Saved barcodes plot in %s'%plot_file_name\n return()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def show(self):\n plt.show()", "def plot(self):\n t = np.linspace(0, self.days, self.days + 1)\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')\n ax1.plot(t, self.S, label=\"Susceptible\", color='r')\n ax1.set_ylabel(\"Number of Susceptible People\")\n ax1.set_title(\"Strong Infecitous Model SEIRV Simulation\")\n ax3.plot(t, self.I, label=\"Active Cases\", color='b')\n ax3.set_ylabel(\"Active Cases\")\n ax2.plot(t, self.E, label=\"Exposed\", color='c')\n ax2.set_ylabel(\"# of Exposed\")\n ax4.plot(t, self.R, label=\"Recovered\", color='m')\n ax5.set_xlabel(\"Days\")\n ax4.set_ylabel('Number of Recovered')\n ax5.plot(t, self.V, label=\"Vaccinated\")\n ax5.set_ylabel(\"# Vaccinated\")\n ax1.legend()\n ax2.legend()\n ax3.legend()\n ax4.legend()\n plt.show()\n return fig", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def main():\n\tplt.clf()\n\taxes = setup_axes()\n\tplot_output(axes, \"../../simulations/default\", \"black\")\n\tplot_output(axes, \"../../simulations/yccsr_zero\", \"crimson\")\n\tplot_output(axes, \"../../simulations/yccsr_linear\", \"lime\")\n\tplot_output(axes, \"../../simulations/yccsr_1-exp\", \"deepskyblue\")\n\tvisuals.plot_track_points_intervals(axes[0],\n\t\tvice.history(\"../../simulations/default\"), element = \"Sr\",\n\t\treference = \"Fe\")\n\tplot_legend(axes[1])\n\tplt.tight_layout()\n\tvisuals.yticklabel_formatter(axes[1])\n\tplt.savefig(sys.argv[1])\n\tplt.clf()", "def _plot_evidence_code(title=None):\n plt.ylabel(\"\")\n plt.title(title)\n plt.tight_layout()\n plt.show()", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def plot(self, x_feature=\"ratio\", y_feature=\"fold_change\", ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n # - Data\n x, y = (\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[y_feature],\n )\n x_, y_ = (\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[y_feature],\n )\n\n x_pred = np.arange(0, x.max(), 0.1)\n y_pred, y_pred_std = self.predict(x_pred.reshape(-1, 1), return_std=True)\n\n # - Plot\n # Segments used for fitting\n ax.scatter(\n x,\n y,\n c=cy.QCplot.PAL_DBGD[0],\n alpha=0.7,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) >= {self.n_sgrna}\",\n )\n\n # Segments not used for fitting\n plt.scatter(\n x_,\n y_,\n c=cy.QCplot.PAL_DBGD[0],\n marker=\"X\",\n alpha=0.3,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) < {self.n_sgrna}\",\n )\n\n # Plot GP fit\n # GP fit\n plt.plot(\n x_pred, y_pred, ls=\"-\", lw=1.0, c=cy.QCplot.PAL_DBGD[1], label=\"GPR mean\"\n )\n plt.fill_between(\n x_pred,\n y_pred - y_pred_std,\n y_pred + y_pred_std,\n alpha=0.2,\n color=cy.QCplot.PAL_DBGD[1],\n lw=0,\n )\n\n # Misc\n plt.axhline(0, ls=\":\", color=cy.QCplot.PAL_DBGD[2], lw=0.3, zorder=0)\n\n plt.xlabel(f\"Segment\\n{x_feature}\")\n plt.ylabel(f\"Segment\\nmean {y_feature}\")\n\n plt.title(f\"{self.kernel_}\", fontsize=6)\n\n plt.legend(frameon=False)\n\n return ax", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def plot(self, x, y, b, path=None):\n label = [\"atypical\", \"indeterminate\", \"negative\", \"typical\"]\n _, pred = self.cam_model.predict(x)\n for i in range(len(x)):\n image = x[i] if x.shape[-1] == 3 else np.squeeze(x[i], -1)\n\n fig, axs = plt.subplots(2, 2)\n for j in range(4):\n ax_x = [0, 1, 0, 1]\n ax_y = [0, 0, 1, 1]\n ax = axs[ax_x[j], ax_y[j]]\n p = np.argmax(pred[i])\n a = np.argmax(y[i])\n c = '(pa)' if j == p and p == a else '(p)' if j == p else '(a)' if j == a else ''\n ax.title.set_text(f\"{label[j]} {c}\")\n # hide axis ticks\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis='both', which='both', length=0)\n # plot original image with boxes\n ax.imshow(image, cmap=\"gray\", aspect=\"equal\")\n for box in b[i]:\n ax.add_patch(Rectangle((box[\"x\"], box[\"y\"]), box[\"width\"], box[\"height\"], linewidth=1, edgecolor=\"r\", facecolor=\"None\", alpha=0.6))\n # plot CAM\n camap = self.generate(x[i], label=j, zoom=True)\n camap = ax.imshow(camap, cmap=\"coolwarm\", aspect=\"equal\", alpha=0.6)\n #cax = fig.add_axes([ax2.get_position().x1+0.01, ax2.get_position().y0,0.02, ax2.get_position().height])\n #plt.colorbar(camap, cax=cax, orientation=\"vertical\")\n if path != None: plt.savefig(path + f\"_{i}.png\", dpi=300, format=\"png\")\n plt.show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def plot_basins(f, Df, zeros, domain, res=1000, iters=15):\n raise NotImplementedError(\"Problem 7 Incomplete\")", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def main(args=None):\n opt = _parse_options(args)\n plot_analysis(opt)\n plt.show()", "def plot_forest(self):\n ax, = az.plot_forest(self.ifd_, var_names=[\"avg\", \"a_coef\", \"b_vals_coef\", \"b_mask_coef\", \"c_vals_coef\", \"c_mask_coef\"])\n ax.axvline(0, linestyle=':', color='black')\n # return ax", "def show_dbscan():\n\n # simulate normal hourly data\n weekday = ([0.05, 0.95], 0.05) #bath, bed\n weekend = ([0.3, 0.7], 0.1)\n roomperwd, truelabelswd = make_blobs(n_samples=23, centers=weekday[0],\n cluster_std=weekday[1], random_state=0)\n roomperwe, truelabelswe = make_blobs(n_samples=8, centers=weekend[0],\n cluster_std=weekend[1], random_state=0)\n\n # combine modes\n roompers = np.vstack((roomperwd, roomperwe))\n\n # make positive and sum to one to simulate valid distribution\n for i in range(roompers.shape[0]):\n for j in range(roompers.shape[1]):\n if roompers[i, j] < 0:\n roompers[i, j] = 0\n roompersnorm = normalize(roompers, norm='l1')\n\n # simulate anomaly on most recent day where don't leave bedroom\n roompersnorm[-1, :] = np.array([0.8, 0.2])\n\n # detect outliers\n roompersdetector = HourlyRoomPercentageAnomalyDetection(roompersnorm, eps=0.3, min_samples=3)\n labels = roompersdetector.scale_and_proximity_cluster(eps=0.3, min_samples=3)\n\n # plot results\n plt.figure()\n seenflag1 = False; seenflag2 = False; seenflag3 = False;\n for i, label in enumerate(labels):\n if label == 0:\n if seenflag1:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro', label='Cluster 1')\n seenflag1 = True\n elif label == 1:\n if seenflag2:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx', label='Cluster 2')\n seenflag2 = True\n elif label == -1:\n if seenflag3:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^', label='Outlier')\n seenflag3 = True\n plt.legend(loc='lower left')\n plt.axis([0, 1, 0, 1])\n plt.show()", "def plot_biomass_by_region(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n bmsmt = df['BmsMT'].groupby([df.Year, df.Reg, df.Sreg]).mean() \n\n bmsmt.loc[:, 'All', 'All'].plot(ax=ax, label='All') \n\n subregs = [str(x) for x in range(1,12)]\n label = 'Mid Atlantic'\n bmsmt.loc[:, '1', subregs].unstack(level=0).sum().plot(ax=ax, label=label)\n\n subregs = [str(x) for x in range(1,12)] \n label = 'Georges Bank'\n bmsmt.loc[:, '2', subregs].unstack(level=0).sum().plot(ax=ax, label=label)\n\n ax.legend(loc='best')\n ax.set_ylim(bottom=-10000)\n ax.set_position(default_timeseries_position)\n ax.set_ylabel('Biomass (mt meats)')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['biomass']['by_region'] = content\n\n plt.close()", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return", "def plot(self, area=False):\n for b in self.buildings:\n b.plot()", "def plot(self):\n cs = plt.contour(self.X, self.Y, self.fitness_function)\n plt.clabel(cs, inline=1, fontsize=6)\n plt.imshow(self.fitness_function, extent=self.limits, origin=\"lower\", alpha=0.3)", "def visualize(self, paths, instance, during_analysis):\r\n xvalues = np.arange(self.data.shape[0])\r\n\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n\r\n \"\"\"The visualizer now outputs images of the best-fit results to hard-disk (checkout `visualizer.py`).\"\"\"\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=self.data,\r\n title=\"Data\",\r\n ylabel=\"Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=model_data,\r\n title=\"Model Data\",\r\n ylabel=\"Model Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"model_data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=residual_map,\r\n title=\"Residual Map\",\r\n ylabel=\"Residuals\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"residual_map\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=chi_squared_map,\r\n title=\"Chi-Squared Map\",\r\n ylabel=\"Chi-Squareds\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"chi_squared_map\",\r\n )", "def plot(self):\n\t\tself.plotOfSpect()", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def plot_eos(eos_pk):\n import pylab as pl\n from aiida.orm import load_node\n eos_calc=load_node(eos_pk)\n eos_result=eos_calc.out.result\n raw_data = eos_result.dict.eos_data\n \n data = []\n for V, E, units in raw_data:\n data.append((V,E))\n \n data = np.array(data)\n params, covariance = fit_birch_murnaghan_params(data[:,0],data[:,1])\n \n vmin = data[:,0].min()\n vmax = data[:,0].max()\n vrange = np.linspace(vmin, vmax, 300)\n\n pl.plot(data[:,0],data[:,1],'o')\n pl.plot(vrange, birch_murnaghan(vrange, *params))\n\n pl.xlabel(\"Volume (ang^3)\")\n # I take the last value in the list of units assuming units do not change\n pl.ylabel(\"Energy ({})\".format(units))\n pl.show()", "def dendogram(self):\r\n \r\n plt.figure(figsize=(20, 7))\r\n dendrogram = sch.dendrogram(sch.linkage(self.X, method='ward'))\r\n plt.title(\"Dendograms\")\r\n plt.axhline(linestyle='--', y=5) \r\n plt.show()", "def data_visualization(df):\r\n\r\n # Visualizing the target variable\r\n plt.figure(figsize=(14, 10))\r\n plt.title(\"Count of bike sharing according to dates\")\r\n plt.plot(df['dteday'], df['cnt'])\r\n #plt.show()\r\n plt.savefig(\"Raw data visualization.png\")\r\n\r\n # box plot for visualizing outliers\r\n fig=px.box(df, y=\"cnt\", notched=True,title='Box plot of the count variable')\r\n #fig.show()\r\n plt.savefig(\"Box Plot.png\")\r\n\r\n # point plot for hourly utilization\r\n for column in ['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit']:\r\n hist = px.histogram(df, x=column, y='cnt')\r\n hist.show()\r\n plt.savefig(\"Histogram plots for each column.png\")\r\n sns.pointplot(x=df['hr'], y='cnt', data=df);\r\n plt.title(\"Hourly Utilization\")\r\n plt.ylabel(\"Bike Shares\", fontsize=12)\r\n plt.xlabel(\"Hour\", fontsize=12)\r\n plt.savefig(\"Hourly Utilization point plot.png\", dpi=300, bbox_inches='tight')\r\n\r\n # line plot for hourly utilization\r\n for c in ['holiday','season','workingday']:\r\n sns.lineplot(data=df,x='hr',y='cnt',hue=c)\r\n plt.title('Hourly plot vs count')\r\n plt.savefig(\"Hour vs count plot_main features.png\",dpi=300, bbox_inches='tight')\r\n\r\n # point plots for humidity vs count\r\n sns.pointplot(x='hum', y='cnt', data=df)\r\n plt.title(\"Amount of bike shares vs humidity\", fontsize=25)\r\n plt.xlabel(\"Humidity (%)\", fontsize=20)\r\n plt.ylabel('count of bike shares', fontsize=20)\r\n plt.locator_params(axis='x', nbins=10)\r\n plt.savefig(\"Pointplot of humidity vs count.png\",dpi=300, bbox_inches='tight')\r\n\r\n # box plots of whole df\r\n bx=px.box(df, y=\"cnt\")\r\n bx.show()\r\n\r\n # feature correlation plot\r\n corrs = abs(df.corr())\r\n sns.heatmap(corrs, annot=True)\r\n plt.title(\"Feature Correlation\")\r\n plt.savefig(\"Feature_correlation.png\", dpi=300, bbox_inches='tight')\r\n return plt", "def create_visual_graph(self):\n if self.predict_new and self.prediction_without_covid_case:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n else:\n self.restore_prediction_df()\n if not self.analysis_plot:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n\n self.do_plot()\n self.output_graph_file = OUTPUT_GRAPH_PATH\n return self.output_graph_file", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n if len(self.hydoutput.shape)<2:\n pnp = self.hydoutput\n else:\n sensitivity = hyd_calibration(self.cfreq)\n pnp = -1*np.min(self.hydoutput,1)/sensitivity\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n figure1 = plt.plot(self.depth, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Depth (mm)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_depthprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Covid-19 Progression Simulation\")\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n\n x_values = np.array([])\n y_values = np.array([])\n color_values = np.array([])\n\n for p in self.persons:\n x_values = np.append(x_values, p.position[0])\n y_values = np.append(y_values, p.position[1])\n color_values = np.append(color_values, self.color(p.state))\n\n colors = [\"green\", \"red\", \"blue\", \"black\"]\n\n scatter = ax.scatter(x_values, y_values,\n c=color_values, vmin=0, vmax=100)\n\n ax.legend(handles=self.legend_elements, loc='upper right')\n\n self.anim = manim.FuncAnimation(\n fig, self.animate, interval=self.update_interval, fargs=(self, ax, scatter))\n\n plt.tight_layout()\n plt.show()", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def figures(self):\n if np.size(self.iceicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.iceicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceicehorizons_depth1),\n self.site2.fct_age_init(self.iceicehorizons_depth2),\n color=pccfg.color_init, linestyle='', marker='o', markersize=2,\n label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceicehorizons_depth1),\n self.site2.fct_age_model(self.iceicehorizons_depth2),\n color=pccfg.color_mod, linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceicehorizons_depth1),\n self.site2.fct_age(self.iceicehorizons_depth2), color=pccfg.color_opt,\n xerr=np.zeros(np.size(self.iceicehorizons_depth1)),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceicehorizons_depth1)-self.iceicehorizons_sigma/2\n ystart = self.site2.fct_age(self.iceicehorizons_depth2)+self.iceicehorizons_sigma/2\n for i in range(np.size(self.iceicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceicehorizons_sigma[i],\n -self.iceicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement', zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_ice_synchro.pdf')\n elif self.site1.archive == 'icecore' or self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n if np.size(self.airairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.airairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airairhorizons_depth1),\n self.site2.fct_airage_init(self.airairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airairhorizons_depth1),\n self.site2.fct_airage_model(self.airairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airairhorizons_depth1),\n self.site2.fct_airage(self.airairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airairhorizons_depth1)-\\\n self.airairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.airairhorizons_depth2)+\\\n self.airairhorizons_sigma/2\n for i in range(np.size(self.airairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airairhorizons_sigma[i],\n -self.airairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site2.archive == 'icecore':\n if np.size(self.iceairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.iceairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceairhorizons_depth1),\n self.site2.fct_airage_init(self.iceairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceairhorizons_depth1),\n self.site2.fct_airage_model(self.iceairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceairhorizons_depth1),\n self.site2.fct_airage(self.iceairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.iceairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceairhorizons_depth1)-\\\n self.iceairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.iceairhorizons_depth2)+\\\n self.iceairhorizons_sigma/2\n for i in range(np.size(self.iceairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceairhorizons_sigma[i],\n -self.iceairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0) \n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_air_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore':\n if np.size(self.airicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.airicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airicehorizons_depth1),\n self.site2.fct_age_init(self.airicehorizons_depth2),\n color=pccfg.color_init,\n linestyle='', marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airicehorizons_depth1),\n self.site2.fct_age_model(self.airicehorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airicehorizons_depth1),\n self.site2.fct_age(self.airicehorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airicehorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airicehorizons_depth1)-\\\n self.airicehorizons_sigma/2\n ystart = self.site2.fct_age(self.airicehorizons_depth2)+\\\n self.airicehorizons_sigma/2\n for i in range(np.size(self.airicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airicehorizons_sigma[i],\n -self.airicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement')\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()", "def ResBeam_Stats_plot(n, header_bmaj, header_bmin): \n\n file_dir = 'SpectralCube_BeamLogs'\n basename = '/beamlog.image.restored.' + imagebase + field\n\n # use different basename for the Milky Way range\n if not glob.glob(file_dir + basename +'*.txt'):\n basename = '/beamlog.image.restored.' + imagebase + 'MilkyWay.' + field\n\n \n BEAM_THRESHOLD = []\n \n title1 = 'Restoring beam bmaj standard deviation [arcsec]'\n plt_name1 = 'BmajStdev.png'\n saved_fig1 = fig_dir+'/'+plt_name1\n\n title2 = 'Restoring beam bmin standard deviation [arcsec]'\n plt_name2 = 'BminStdev.png'\n saved_fig2 = fig_dir+'/'+plt_name2\n\n title3 = 'Maximum ratio of beam area'\n plt_name3 = 'max_ratioBA.png'\n saved_fig3 = fig_dir+'/'+plt_name3\n\n title4 = 'Minimum ratio of beam area' \n plt_name4 = 'min_ratioBA.png'\n saved_fig4 = fig_dir+'/'+plt_name4\n \n params = {'axes.labelsize': 10,\n 'axes.titlesize': 10,\n 'font.size':10}\n\n pylab.rcParams.update(params)\n\n beamXPOS, beamYPOS = BeamPosition()\n fig1, ax1 = plt.subplots()\n fig2, ax2 = plt.subplots()\n fig3, ax3 = plt.subplots()\n fig4, ax4 = plt.subplots()\n \n for i in range(0,36):\n bnum = n[i]\n infile = file_dir + basename +'.beam%02d.txt'%(bnum)\n bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_BA, min_ratio_BA = cal_ResBeam_Stats(infile, header_bmaj, header_bmin)\n BEAM_THRESHOLD.append(beam_threshold)\n\n ax1.scatter([beamXPOS[i]], [beamYPOS[i]], s=1400, edgecolors='black', facecolors='none')\n ax1.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax1.text(beamXPOS[i], beamYPOS[i]-0.02, round(bmaj_stdev, 3), va='center', ha='center', fontsize=8, color='blue')\n\n ax2.scatter([beamXPOS[i]], [beamYPOS[i]], s=1400, edgecolors='black', facecolors='none')\n ax2.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax2.text(beamXPOS[i], beamYPOS[i]-0.02, round(bmin_stdev,3), va='center', ha='center', fontsize=8, color='blue')\n\n maxplot = ax3.scatter([beamXPOS[i]], [beamYPOS[i]], s=1300, c=[max_ratio_BA], cmap='summer', edgecolors='black', vmin=0, vmax=1.1)\n ax3.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax3.text(beamXPOS[i], beamYPOS[i]-0.02, round(max_ratio_BA,3), va='center', ha='center', fontsize=8, color='blue')\n \n minplot = ax4.scatter([beamXPOS[i]], [beamYPOS[i]], s=1300, c=[min_ratio_BA], cmap='summer', edgecolors='black', vmin=0, vmax=1.1)\n ax4.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax4.text(beamXPOS[i], beamYPOS[i]-0.02, round(min_ratio_BA,3), va='center', ha='center', fontsize=8, color='blue')\n \n ax1.set_xlim(0,0.7)\n ax1.set_ylim(0,1.4)\n ax1.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax1.set_title(title1)\n\n ax2.set_xlim(0,0.7)\n ax2.set_ylim(0,1.4)\n ax2.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax2.set_title(title2)\n\n ax3.set_xlim(0,0.7)\n ax3.set_ylim(0,1.4)\n ax3.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax3.set_title(title3)\n plt.colorbar(maxplot, ax=ax3)\n\n ax4.set_xlim(0,0.7)\n ax4.set_ylim(0,1.4)\n ax4.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax4.set_title(title4)\n plt.colorbar(minplot, ax=ax4)\n\n fig1.savefig(saved_fig1, bbox_inches='tight')\n fig2.savefig(saved_fig2, bbox_inches='tight')\n fig3.savefig(saved_fig3, bbox_inches='tight')\n fig4.savefig(saved_fig4, bbox_inches='tight')\n\n plt.close('all')\n\n return saved_fig1, saved_fig2, plt_name1, plt_name2, saved_fig3, saved_fig4, plt_name3, plt_name4, BEAM_THRESHOLD", "def boot_induvidual_plot(self): # Setting up induvidual plots\n self.plot_traits = list([self.plt_0.subplot2grid((2, 5), (0, 0)), self.plt_0.subplot2grid((2, 5), (0, 1)),\n self.plt_0.subplot2grid((2, 5), (0, 2)), self.plt_0.subplot2grid((2, 5), (0, 3)),\n self.plt_0.subplot2grid((2, 5), (0, 4))])\n\n # creatng list of plot objects\n\n for x in range(len(self.X_transp)): # Iterating over each attributes patient\n\n present=self.plot_traits[x]\n # Selecting a particular plot object\n present.set_facecolor('orange')\n # setting face color\n present.scatter(self.np_0.arange(len(self.list_patient_names)),self.X_transp[x],c='blue')\n # drawing a scatter plot of this attribute\n\n present.xaxis.set_major_locator(plt.MultipleLocator(1))\n\n present.set_xlabel('Patient ID', fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(self.list_attributes[x], fontweight='bold')\n # setting Y-LABEL\n present.title.set_text(self.list_attributes[x]+\" Variation\")\n # setting Title\n\n present = self.plt_0.subplot2grid((2, 5), (1, 0), colspan=5)\n # to plot the present's status\n present.scatter(self.X_reduced_transp[0], self.X_reduced_transp[1], c='red')\n # plotting in the BOTTOM-PLOT\n\n present.set_xlabel(\"Principle Component -1\", fontweight='bold')\n # setting X-LABEL\n present.set_ylabel(\"Principle Component -2\", fontweight='bold')\n # setting Y-LABEL\n\n for x in range(len(self.list_patient_names)): # Naming each patient with ID\n self.list_patient_names[x] = \"Patient \" + str(x)\n # Eg: Patient 0,Patient 1...\n for i, txt in enumerate(self.list_patient_names): # This is used to enumerate the scatter plots label\n present.annotate(txt, (self.X_reduced_transp[0][i] + 1, self.X_reduced_transp[1][i]), fontsize=10, c='black')\n # Coonecting with present", "def PlotAirplane():\n airplane = vtkInterface.PolyData(planefile)\n airplane.Plot()", "def plot_studies(self):\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n ###\n #check if internships are available in the given file\n ###\n to_read = str(self.exchanged_offers_filepath.get())\n if not os.path.isfile(to_read):\n tkMessageBox.showwarning(title=\"File doesn't exist\",message=\"The filename or the location you entered does not exist!\")\n return None\n else:\n self.exchanged = pd.read_csv(to_read,sep=',',usecols=['domestic offer code','foreign offer code','country','field','min duration','max duration'])\n \n if self.exchanged.empty:\n tkMessageBox.showwarning(title=\"No available data\",\n message=\"No exchanged offers are available in the given file! Add some offers first and try again later\")\n return None\n else:\n ###\n #define study fields to use for plotting\n ###\n studies=['Bio','Environment','Mechanic','Electric','Physic','Civil','Chemi','Material','Computer','Architecture']\n \n ###\n #use pandas functionalities for the plots\n ### \n frequency = pd.DataFrame()\n for univ in studies:\n frequency[univ] = [len(self.check_study(self.exchanged,univ))]\n frequency = frequency.transpose()\n \n ###\n #make figure\n ###\n fig, ax = plt.subplots()\n frequency.sort_index().plot(ax=ax,kind='bar',figsize=(8,6))\n ax.tick_params(axis='both', labelsize=16)\n ax.set_xticklabels(ax.xaxis.get_ticklabels(),rotation=45)\n fig.tight_layout()\n\n ###\n #show figure in new tkinter window, and adjust window size to figure size\n ###\n figure_window_1 = tk.Toplevel()\n figure_window_1.title('Figure')\n #figure_window_1.geometry()\n #print()\n \n ###\n #create label to put figure in\n ###\n figure_canvas = FigureCanvasTkAgg(fig,master=figure_window_1)\n figure_canvas.get_tk_widget().grid(column=0,row=0)\n #pix_in_inch = figure_window_1.winfo_pixels('1i') #number of pixels in 1 inch\n #figure_canvas.get_tk_widget().geometry('{}x{}'.format(int(fig.get_figwidth()*pix_in_inch),\n # int(fig.get_figheight()*pix_in_inch)))", "def show_plot(self):\n label_1 = (self.own_name_1 + \"'s account\")\n label_2 = (self.own_name_2 + \"'s account\")\n clusters = 3\n counts_1 = (self.op_full_name_count_1, self.op_first_name_count_1, self.op_last_name_count_1)\n counts_2 = (self.op_full_name_count_2, self.op_first_name_count_2, self.op_last_name_count_2)\n fig, ax = plt.subplots()\n index = np.arange(clusters)\n bar_width = 0.2\n opacity = 0.5\n rects1 = plt.bar(index, counts_1, bar_width, alpha=opacity, color=\"b\", label=label_1)\n rects2 = plt.bar(index + bar_width, counts_2, bar_width, alpha=opacity, color=\"g\", label=label_2)\n #plt.xlabel(\"Name forms\")\n plt.ylabel(\"Number of references\")\n plt.title(\"Reference of opponents name\")\n plt.xticks(index + bar_width, (\"Opponent's Full Name\", \"Opponent's First Name only\", \"Opponent's Last name only\"))\n plt.legend()\n plt.tight_layout()\n plt.show()", "def show_custom_graph(self):\n pass", "def plot_ensembles(self, forecast=None, fhr=None, interpolate=True, domain=\"dynamic\", ax=None, cartopy_proj=None, save_path=None, **kwargs):\n\n # Pop kwargs\n prop_members = kwargs.pop('prop_members', {})\n prop_mean = kwargs.pop('prop_mean', {})\n prop_gfs = kwargs.pop('prop_gfs', {})\n prop_btk = kwargs.pop('prop_btk', {})\n prop_ellipse = kwargs.pop('prop_ellipse', {})\n prop_density = kwargs.pop('prop_density', {})\n map_prop = kwargs.pop('map_prop', {})\n\n # Create instance of plot object\n try:\n self.plot_obj\n except:\n self.plot_obj = TrackPlot()\n\n # -------------------------------------------------------------------------\n\n # Get forecasts dict saved into storm object, if it hasn't been already\n try:\n self.forecast_dict\n except:\n self.get_operational_forecasts()\n\n # Fetch latest forecast if None\n if forecast is None:\n inits = []\n for key in ['AC00', 'AP01', 'AP02', 'AP03', 'AP04', 'AP05']:\n if key in self.forecast_dict.keys():\n inits.append(dt.strptime(\n [k for k in self.forecast_dict[key]][-1], '%Y%m%d%H'))\n if len(inits) > 0:\n forecast = min(inits)\n else:\n raise RuntimeError(\n \"Error: Could not determine the latest available GEFS forecast.\")\n\n # Determine max members by year\n nens = 21\n if self.year >= 2020 and ('AP21' in self.forecast_dict.keys() or 'AP22' in self.forecast_dict.keys() or 'AP23' in self.forecast_dict.keys()):\n nens = 31\n\n # Enforce fhr type\n if isinstance(fhr, list):\n fhr = fhr[0]\n\n # If this forecast init was recently used, don't re-calculate\n init_used = False\n try:\n if self.gefs_init == forecast:\n init_used = True\n except:\n pass\n\n # Only calculate if needed to\n if not init_used:\n\n print(\"--> Starting to calculate ellipse data\")\n\n # Create dict to store all data in\n ds = {'gfs': {'fhr': [], 'lat': [], 'lon': [], 'vmax': [], 'mslp': [], 'time': []},\n 'gefs': {'fhr': [], 'lat': [], 'lon': [], 'vmax': [], 'mslp': [], 'time': [],\n 'members': [], 'ellipse_lat': [], 'ellipse_lon': []}\n }\n\n # String formatting for ensembles\n def str2(ens):\n if ens == 0:\n return \"AC00\"\n if ens < 10:\n return f\"AP0{ens}\"\n return f\"AP{ens}\"\n\n # Get GFS forecast entry (AVNX is valid for RAL a-deck source)\n gfs_key = 'AVNO' if 'AVNO' in self.forecast_dict.keys() else 'AVNX'\n try:\n forecast_gfs = self.forecast_dict[gfs_key][forecast.strftime(\n \"%Y%m%d%H\")]\n except:\n raise RuntimeError(\n \"The requested GFS initialization isn't available for this storm.\")\n\n # Enter into dict entry\n ds['gfs']['fhr'] = [int(i) for i in forecast_gfs['fhr']]\n ds['gfs']['lat'] = [np.round(i, 1) for i in forecast_gfs['lat']]\n ds['gfs']['lon'] = [np.round(i, 1) for i in forecast_gfs['lon']]\n ds['gfs']['vmax'] = [float(i) for i in forecast_gfs['vmax']]\n ds['gfs']['mslp'] = forecast_gfs['mslp']\n ds['gfs']['time'] = [forecast +\n timedelta(hours=i) for i in forecast_gfs['fhr']]\n\n # Retrieve GEFS ensemble data (30 members 2019-present, 20 members prior)\n for ens in range(0, nens):\n\n # Create dict entry\n ds[f'gefs_{ens}'] = {\n 'fhr': [],\n 'lat': [],\n 'lon': [],\n 'vmax': [],\n 'mslp': [],\n 'time': [],\n }\n\n # Retrieve ensemble member data\n ens_str = str2(ens)\n if ens_str not in self.forecast_dict.keys():\n continue\n if forecast.strftime(\"%Y%m%d%H\") not in self.forecast_dict[ens_str].keys():\n continue\n forecast_ens = self.forecast_dict[ens_str][forecast.strftime(\n \"%Y%m%d%H\")]\n\n # Enter into dict entry\n ds[f'gefs_{ens}']['fhr'] = [int(i)\n for i in forecast_ens['fhr']]\n ds[f'gefs_{ens}']['lat'] = [\n np.round(i, 1) for i in forecast_ens['lat']]\n ds[f'gefs_{ens}']['lon'] = [\n np.round(i, 1) for i in forecast_ens['lon']]\n ds[f'gefs_{ens}']['vmax'] = [\n float(i) for i in forecast_ens['vmax']]\n ds[f'gefs_{ens}']['mslp'] = forecast_ens['mslp']\n ds[f'gefs_{ens}']['time'] = [forecast +\n timedelta(hours=i) for i in forecast_ens['fhr']]\n\n # Construct ensemble mean data\n # Iterate through all forecast hours\n for iter_fhr in range(0, 246, 6):\n\n # Temporary data arrays\n temp_data = {}\n for key in ds['gfs'].keys():\n if key not in ['time', 'fhr']:\n temp_data[key] = []\n\n # Iterate through ensemble member\n for ens in range(nens):\n\n # Determine if member has data valid at this forecast hour\n if iter_fhr in ds[f'gefs_{ens}']['fhr']:\n\n # Retrieve index\n idx = ds[f'gefs_{ens}']['fhr'].index(iter_fhr)\n\n # Append data\n for key in ds['gfs'].keys():\n if key not in ['time', 'fhr']:\n temp_data[key].append(\n ds[f'gefs_{ens}'][key][idx])\n\n # Proceed if 20 or more ensemble members\n if len(temp_data['lat']) >= 10:\n\n # Append data\n for key in ds['gfs'].keys():\n if key not in ['time', 'fhr']:\n ds['gefs'][key].append(np.nanmean(temp_data[key]))\n ds['gefs']['fhr'].append(iter_fhr)\n ds['gefs']['time'].append(\n forecast + timedelta(hours=iter_fhr))\n ds['gefs']['members'].append(len(temp_data['lat']))\n\n # Calculate ellipse data\n if prop_ellipse is not None:\n try:\n ellipse_data = calc_ensemble_ellipse(\n temp_data['lon'], temp_data['lat'])\n ds['gefs']['ellipse_lon'].append(\n ellipse_data['ellipse_lon'])\n ds['gefs']['ellipse_lat'].append(\n ellipse_data['ellipse_lat'])\n except:\n ds['gefs']['ellipse_lon'].append([])\n ds['gefs']['ellipse_lat'].append([])\n else:\n ds['gefs']['ellipse_lon'].append([])\n ds['gefs']['ellipse_lat'].append([])\n\n # Save data for future use if needed\n self.gefs_init = forecast\n self.ds = ds\n\n print(\"--> Done calculating ellipse data\")\n\n # Determine lon bounds for cartopy projection\n proj_lons = []\n for key in self.ds.keys():\n proj_lons += self.ds[key]['lon']\n if fhr is not None and fhr in self.ds['gefs']['fhr']:\n fhr_idx = self.ds['gefs']['fhr'].index(fhr)\n proj_lons += self.ds['gefs']['ellipse_lon'][fhr_idx]\n\n # Create cartopy projection\n if cartopy_proj is not None:\n self.plot_obj.proj = cartopy_proj\n elif np.nanmax(proj_lons) > 150 or np.nanmin(proj_lons) < -150:\n self.plot_obj.create_cartopy(\n proj='PlateCarree', central_longitude=180.0)\n else:\n self.plot_obj.create_cartopy(\n proj='PlateCarree', central_longitude=0.0)\n\n # Account for cases crossing dateline\n ds = copy.deepcopy(self.ds)\n if np.nanmax(proj_lons) > 150 or np.nanmin(proj_lons) < -150:\n for key in ds.keys():\n new_lons = np.array(ds[key]['lon'])\n new_lons[new_lons < 0] = new_lons[new_lons < 0] + 360.0\n ds[key]['lon'] = new_lons.tolist()\n\n # Re-calculate GEFS mean\n for iter_hr in ds['gefs']['fhr']:\n fhr_idx = ds['gefs']['fhr'].index(iter_hr)\n ds['gefs']['lon'][fhr_idx] = np.nanmean([ds[f'gefs_{ens}']['lon'][ds[f'gefs_{ens}']['fhr'].index(\n iter_hr)] for ens in range(nens) if iter_hr in ds[f'gefs_{ens}']['fhr']])\n\n # Plot storm\n plot_ax = self.plot_obj.plot_ensembles(forecast, self.dict, fhr, interpolate, prop_members, prop_mean,\n prop_gfs, prop_btk, prop_ellipse, prop_density, nens, domain,\n ds, ax=ax, map_prop=map_prop, save_path=save_path)\n\n # Return axis\n return plot_ax", "def displayGraph(self):\n self.dto.displayVerticalGraph()\n print(\"Vertical Bar Graph displayed.\")", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def draw_easy_plot(terminate=True):\r\n print_info('begin', 'draw a eazy plot to check the connection with remote server.')\r\n x = np.linspace(-1, 1, 50)\r\n y = 2 * x + 1\r\n plt.plot(x, y)\r\n plt.show()\r\n print_info()\r\n # terminate the program\r\n exit() if terminate else None # 不行就换这个:terminate and exit()\r", "def plot(self):\n fig, ax = plt.subplots()\n ticklabels = [item.strftime('%b %d') for item in self.series.index]\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n\n plt.ylabel('#Cases')\n i = 0\n for y in self.countries:\n plt.plot(ticklabels, self.series[y], GRAPH_FORMATS[i], label=y)\n i += 1\n ax.set_xticklabels(ticklabels, rotation='vertical', fontsize=10)\n plt.legend()\n plt.grid()\n if self.log:\n plt.yscale(\"log\")\n plt.show()", "def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()", "def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def plot_Hubble():\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n z = SN_data[\"SNZ\"]\n mu = SN_data['SNMU']\n mu_err = SN_data['SNMU_ERR']\n z_array = np.linspace(0.0, 1.5 + 0.01, 1001)\n mu_cosm = 5 * np.log10((1 + z_array) * Convergence.comoving(z_array, OM=0.25, OL=0.75, h=0.7) * 1000) + 25\n mu_diff = SN_data['mu_diff']\n ax = plt.subplot2grid((2, 1), (0, 0))\n ax2 = plt.subplot2grid((2, 1), (1, 0))\n ax.set_ylabel(\"$\\mu$\")\n ax2.set_xlabel(\"$z$\")\n ax2.set_ylabel(\"$\\Delta\\mu$\")\n plt.subplots_adjust(wspace=0, hspace=0)\n ax.set_xticklabels([])\n ax.tick_params(labelsize=12)\n ax.errorbar(z[::2], mu[::2], mu_err[::2], linestyle='', linewidth=0.8, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax.plot(z[::2], mu[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n\n ax.set_ylim([38.5, 46])\n ax.set_xlim([0, 1.5])\n ax.plot(z_array, mu_cosm, linestyle='--', linewidth=0.8, color='C0', zorder=10)\n ax2.errorbar(z[::2], mu_diff[::2], mu_err[::2], linestyle='', linewidth=1, marker='o',\n markersize=2, capsize=2, color='C3', zorder=0, alpha=0.6, elinewidth=0.7)\n ax2.plot(z[::2], mu_diff[::2], linestyle='', marker='o', markersize=2, color='C3', alpha=0.4, markerfacecolor='C3')\n ax2.plot(z_array, np.zeros(len(z_array)), zorder=10, color='C0', linewidth=0.8, linestyle='--')\n ax2.set_ylim(-1.0, 1.0)\n ax2.set_xlim([0, 1.5])\n ax2.tick_params(labelsize=12)\n\n plt.show()", "def showPlot2():\n raise NotImplementedError", "def finalize_plot(self, artifact_name, attacker_x=None, attacker_y=None):\n # Plot the axis ticks.\n plt.ylim((self.min_y - 10.0, self.max_y + 10.0))\n plt.xlim((self.min_x - 10.0, self.max_x + 10.0))\n plt.xticks([self.min_x + 1000, 0.0, self.max_x], size=15)\n plt.yticks([self.min_y + 1000, 0.0, self.max_y], size=15)\n # Add and place the labels.\n ax = plt.gca()\n plt.ylabel(\"Crossrange (ft)\", size=15)\n plt.xlabel(\"Downrange (ft)\", size=15)\n plt.subplots_adjust(bottom=0.25, left=0.25)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n # Place the plane.\n plane = plt.imread(\"plane.png\").transpose((1, 0, 2))\n width = (self.max_x - self.min_x) / 10\n height = (496.0 / 499.0) * width\n x_start = -(width / 2.0)\n y_start = -(height / 2.0)\n plt.imshow(plane, extent=[x_start, x_start + width,\n y_start, y_start + height], zorder=100)\n plane = np.flip(plane, 1)\n if attacker_x is None:\n attacker_x = self.max_x - (2 * width)\n if attacker_y is None:\n attacker_y = self.max_y - (2 * height)\n red_plane = self.color_plane_png(plane, [1.0, 0, 0], True)\n plt.imshow(red_plane, zorder=100,\n extent=[attacker_x, attacker_x + width,\n attacker_y, attacker_y + height])\n self.record_artifact(plt, artifact_name, \"matplotlib\")\n plt.clf()", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def plot_covid():\n\n norway, usa = get_covid()\n _plot_covid(norway[0], norway[1], \"Norway\", \"12 Mar 2020\", \"#636efa\", \"#ef553b\")\n _plot_covid(usa[0], usa[1], \"USA\", \"22 Mar 2020\", \"#ef553b\", \"#636efa\")\n\n plot_relative_covid(norway, usa)", "def plot(self, **kwargs):\n\n from ..plot import Plot\n\n p = Plot(1, 1, 1, **kwargs)\n\n p.axes[0].plot(self.dispersion.value, self.flux.value,\n drawstyle='steps-mid')\n\n if self.flux.uncertainty is not None:\n p.axes[0].plot(self.dispersion.value, self.flux.uncertainty.value,\n drawstyle='steps-mid')\n\n p.tidy()\n p.display()", "def present_graph(data, perron_frobenius, page_rank, degree_distribution):\n info = [\n {\n 'data': data[::-1],\n 'title': 'Excitation Development via Adjacency-Matrix Multiplication',\n 'rel_height': 6\n },\n {\n 'data': np.array([perron_frobenius]),\n 'title': 'Perron-Frobenius Eigenvector',\n 'rel_height': 1\n }#,\n# {\n# 'data': np.array([page_rank]),\n# 'title': 'Pagerank',\n# 'rel_height': 1\n# },\n# {\n# 'data': np.array([degree_distribution]),\n# 'title': 'Degree Distribution',\n# 'rel_height': 1\n# }\n ]\n\n dimen = 20\n\n hm = plt.subplot2grid((dimen,dimen), (0,0), rowspan=17, colspan=19)\n pf = plt.subplot2grid((dimen,dimen), (18,0), rowspan=5, colspan=19)\n cb = plt.subplot2grid((dimen,dimen), (0,19), rowspan=20)\n\n for entry, ax in zip(info, [hm, pf]):\n hm = ax.pcolor(entry['data'], cmap=cm.gray, vmin=0, vmax=1)\n\n ax.set_title(entry['title'], fontsize=23)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n pcb = plt.colorbar(hm, cax=cb)\n pcb.ax.tick_params(labelsize=20)\n\n \"\"\"hm = plt.subplot2grid((3,3), (0,0), rowspan=2, colspan=2)\n pf = plt.subplot2grid((3,3), (2,0), colspan=2)\n cb = plt.subplot2grid((3,3), (0, 2), rowspan=3)\n\n h = hm.pcolor(data[::-1], cmap=cm.gray, vmin=-0.1, vmax=1)\n pf.pcolor(np.array([perron_frobenius]), cmap=cm.gray, vmin=-0.1, vmax=1)\n plt.colorbar(h, cax=cb)\"\"\"\n\n Plotter.show('overview')", "def graph():\n fp = mpl.font_manager.FontProperties(family='JasmineUPC',size=24)\n x = np.arange(0,10)\n y = [386557057065, 368368395622, 242451971944, 225960095934, 161573560379, 107461232731, 89784502211, 73749349545, 54525219632, 52864743212]\n name = ['เชื้อเพลิงที่ได้จากแร่', 'เครื่องจักรและส่วนประกอบ', 'ยานยนต์และส่วนประกอบ', 'เครื่องอุปกรณ์ไฟฟ้าและส่วนประกอบ', 'เหล็กและเหล็กกล้า', 'พลาสติกและของทำด้วยพลาสติก', 'ของทำด้วยเหล็กหรือเหล็กกล้า', 'ทองแดงละของทำด้วยทองแดง', 'เคมีภัณฑ์เบ็ดเตล็ด', 'อุปกรณ์ที่ใช้ทางทัศนศาสตร์']\n ax = plt.gca(xticks=x)\n ax.set_xticklabels(name,rotation=1000,fontproperties=fp)\n plt.bar(x,y,color='g')\n plt.show()", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def main():\n \n \"\"\" Download and load data\"\"\"\n dfs = get_data()\n \n \"\"\" Preprocess data, combine rows for country provinces\"\"\"\n combine_list = [\"Australia\", \"US\", \"Canada\", \"Mainland China\", \"China\"]\n for key in dfs.keys():\n dfs[key] = preprocess(df=dfs[key], combine_list=combine_list)\n \n \"\"\" Compute additional variables\"\"\"\n dfs = compute_deaths_over_closed(dfs)\n dfs = compute_active_cases(dfs)\n dfs = compute_death_rate(dfs)\n dfs = compute_df_reindexed(dfs, \"active_cases\")\n dfs = compute_df_reindexed(dfs, \"death_rate\")\n \n \"\"\"Remove 0 and 1 from rate variables\"\"\"\n for keys in [\"death_rate\", \"death_rate_reindexed\", \"deaths_over_closed\"]:\n dfs[keys] = remove_corner_values(dfs[keys])\n \n \"\"\" Set parameters for plotting\"\"\"\n titles = {\"active_cases\": \"COVID-19 Active Cases\", \"active_cases_reindexed\": \"COVID-19 Active Cases (Days from the Start of the Outbreak)\", \"deaths_over_closed\": \"COVID-19 Deaths over (Deaths + Recovered)\", \"death_rate\": \"COVID-19 Death Rate\", \"death_rate_reindexed\": \"COVID-19 Death Rate (Days from the Start of the Outbreak)\"}\n filenames = {\"active_cases\": \"covid19_active.png\", \"active_cases_reindexed\": \"covid19_active_ri.png\", \"deaths_over_closed\": \"covid19_death_over_closed.png\", \"death_rate\": \"covid19_death_rate.png\", \"death_rate_reindexed\": \"covid19_death_rate_ri.png\"}\n row_inclusion_index_threasholds = {\"active_cases\": 770, \"active_cases_reindexed\": 500, \"deaths_over_closed\": 770, \"death_rate\": 770, \"death_rate_reindexed\": 500}\n row_inclusion_indices = {}\n #row_inclusion_indices.get(x) is None:\n # row_inclusion_indices = dfs[\"cases\"].iloc[:,-1] > x\n\n \"\"\" Plot\"\"\"\n for key in row_inclusion_index_threasholds.keys():\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-1] > row_inclusion_index_threasholds[key]\n if key in [\"active_cases_reindexed\", \"death_rate_reindexed\"]:\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-5] > row_inclusion_index_threasholds[key]\n plot(dfs[key], row_inclusion_indices.get(key), titles[key], filenames[key])", "def plot_network_azi(stadict):\n for key in stadict.keys():\n data=np.array(stadict[key])\n text=\"Mean %.2f - Std %.2f\\nMedian %.2f\" % (np.mean(data[:,1]),np.std(data[:,1]),np.median(data[:,1]))\n plt.figure()\n plt.subplot(211)\n plt.plot_date(data[:,0],data[:,1])\n plt.figtext(.6,.8,text)\n plt.ylabel('Offset (degrees)')\n plt.subplot(212)\n plt.plot_date(data[:,0],data[:,2])\n plt.ylabel('Linearity') \n plt.savefig(\"Azimuth_%s.png\" % (key))\n plt.close()", "def visualize(self):\n self.dataFrame.hist()\n plt.show()", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def plotBarPlots(self, stage, strain, strainID, count, p, xm, ym, xn, yn):\n\t\t\n\t\ttimeList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n\t\t\n\t\tind = np.arange(len(timeList))\n\t\t\n\t\tplt.style.use('bmh')\n\t\tcolors = ['#b2182b', '#238b45', '#3690c0', '#023858']\n\t\t\n\t\tfig = plt.figure(figsize=(15,15), frameon=False)\n\t\tax1 = fig.add_subplot(111)\n\t\tfig.suptitle(strain + ' - ' + stage, fontsize=20, fontweight='bold')\n\t\n\t\tax1.spines['top'].set_visible(False)\n\t\tax1.spines['right'].set_visible(False)\n\t\tax1.spines['bottom'].set_visible(False)\n\t\tax1.spines['left'].set_visible(False)\n\t\tax1.yaxis.set_ticks_position('left')\n\t\tax1.xaxis.set_ticks_position('bottom')\n\t\twidth = 0.6\n\t\t\n\t\tax1.bar(ind+width/2., count[timeList,1], width, color=colors[0], edgecolor = \"none\")\n\t\t\n\t\tax1.plot(*p.linspace(), c='k', linewidth=3.0)\n\t\t\n\t\tfor point in range(len(xm)):\n\t\t\tax1.plot(xm[point], ym[point], marker='*', markersize=30, color=\"blue\")\n\t\tfor point in range(len(xn)):\n\t\t\tax1.plot(xn[point], yn[point], marker='*', markersize=30, color=\"blue\")\t\t\t\n\t\t\n\t\tax1.set_ylabel('Frequency as %', fontweight='bold', fontsize=20)\n\t\tax1.set_xlabel('Time', fontweight='bold', fontsize=20)\n\t\txTickMarks = ['%s' %str(j) for j in timeList]\n\t\tax1.set_xticks(ind+width/2)\n\t\txtickNames = ax1.set_xticklabels(xTickMarks, fontweight='bold')\n\t\t\n\t\tplt.setp(xtickNames, fontsize=15)\n\t\tax1.xaxis.set_ticks_position('none')\n\t\tax1.yaxis.set_ticks_position('none')\t\t\n\t\tax1.set_yticklabels(ax1.get_yticks(), fontweight='bold', fontsize=17)\n\t\tax1.set_xlim([6, len(timeList)-6])\n\t\t\t\t\t\n\t\tfname = 'strain%d' %strainID + stage + '.png' \n\t\t\n\t\tfig.savefig(fname, transparent=True, dpi=100)\n\t\tplt.close(fig)", "def plot_model(self):\n \n plt.figure(figsize=[10,5])\n \n plt.scatter(self.receivers['recxs'],self.receivers['reczs'],marker='v')\n if self.source['src_type']==4:\n from obspy.imaging.beachball import beach\n beach = beach(self.source['mt'], xy=(self.source['srcx'],self.source['srcz']), width=self.model_parameters['xmax']*0.05)\n ax = plt.gca()\n \n ax.add_collection(beach) \n ax.set_aspect(\"equal\")\n \n else:\n plt.scatter(self.source['srcx'],self.source['srcz'],marker='*',color='r',s=200)\n \n plt.axhline(y=0,c='0.5')\n plt.xlim(0,self.model_parameters['xmax'])\n plt.ylim(self.model_parameters['zmax'],-0.1*self.model_parameters['zmax'])\n \n plt.xlabel('Distance (km)')\n plt.ylabel('Depth (km)')\n plt.grid()\n plt.show()", "def main():\n\t#print(scipy.__version__)\n\t#image()\n\t#heat_capacity2()\n\t#hist()\n\t#single_plot()\n\n\t#heat_capacity2()\n\t#single_plot()\n\t#plt.show()\n\t#u0_tc()\n\t#multi_heat_capacity(\"HL_DM_flux5\",True)\n\t#multi_heat_capacity2()\n\t#plot_spin()\n\t#plt.show()\n\theat_capacity2(1,2)\n\t#hist()\n\tplt.show()\n\t#potential()\n\t#plt.show()\n\t#heat_capacity(3,4)\n\t#heat_capacity(5,6)\n\t#heat_capacity(7,8)\n\t#final_spins()\n\t#plot_spin()\n\t#plot_from_csv()\n\t#difference_plot()", "def plot_countries(self):\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n ###\n #check if internships are available in the given file\n ###\n to_read = str(self.exchanged_offers_filepath.get())\n if not os.path.isfile(to_read):\n tkMessageBox.showwarning(title=\"File doesn't exist\",message=\"The filename or the location you entered does not exist!\")\n return None\n else:\n self.exchanged = pd.read_csv(to_read,sep=',',usecols=['domestic offer code','foreign offer code','country','field','min duration','max duration'])\n \n if self.exchanged.empty:\n tkMessageBox.showwarning(title=\"No available data\",\n message=\"No exchanged offers are available in the given file! Add some offers first and try again later\")\n return None\n else:\n ###\n #use pandas functionalities for the plots\n ### \n frequency = pd.DataFrame() \n for country in self.exchanged['country'].unique():\n frequency[country] = [len(self.check_country(self.exchanged,country))]\n frequency = frequency.transpose()\n frequency.columns=['values']\n \n ###\n #making figure\n ###\n fig, ax = plt.subplots(figsize=(4,14))\n frequency.sort_values(by='values').plot(ax=ax,kind='barh',figsize=(4,10))\n ax.tick_params(axis='both', labelsize=16)\n fig.tight_layout()\n\n ###\n #show figure in new tkinter window\n ###\n figure_window_2 = tk.Toplevel()\n figure_window_2.title('Figure')\n \n ###\n #create label to put figure in\n ###\n figure_canvas = FigureCanvasTkAgg(fig,master=figure_window_2)\n figure_canvas.get_tk_widget().grid(column=0,row=0)", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def plot_solution(self):\n\n plt.plot(self.x_values, self.analytical(self.x_values, self.C,self.D), label = \"Analytical\")\n plt.plot(self.x_values, self.numerical, label = \"Numerical\")\n plt.title(\"Numerical vs. Analytical Solution\")\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x)\")\n plt.legend()\n plt.show()" ]
[ "0.66557336", "0.6585414", "0.6435863", "0.6365141", "0.61312085", "0.60789055", "0.6042521", "0.6041088", "0.60108477", "0.6002608", "0.59909225", "0.597029", "0.59550214", "0.5904745", "0.5902543", "0.5886251", "0.58612454", "0.5829464", "0.5781532", "0.5781532", "0.5781532", "0.5781111", "0.5755519", "0.5743691", "0.5709254", "0.56876194", "0.5683656", "0.56702167", "0.56475496", "0.56443137", "0.5640885", "0.5640361", "0.56297356", "0.5621056", "0.56145066", "0.5606037", "0.5603925", "0.55936754", "0.55932015", "0.5577484", "0.5577162", "0.5567463", "0.5560573", "0.55581933", "0.5556394", "0.55530035", "0.55292445", "0.55228263", "0.5519561", "0.55182946", "0.5517243", "0.5511874", "0.550455", "0.55044246", "0.5495088", "0.54944295", "0.54944265", "0.5487868", "0.5485897", "0.54845166", "0.5476917", "0.5473604", "0.5468409", "0.5466902", "0.54641163", "0.54629827", "0.5459756", "0.54518706", "0.5451519", "0.54433304", "0.54367846", "0.5433868", "0.54327685", "0.5403014", "0.5399342", "0.53942454", "0.53859514", "0.5382408", "0.5376448", "0.5374062", "0.5372247", "0.5369109", "0.53675306", "0.5357087", "0.5354361", "0.53510165", "0.53438216", "0.5330138", "0.53288096", "0.53285587", "0.53283787", "0.5328241", "0.5319635", "0.5315999", "0.53158814", "0.5307261", "0.53056514", "0.5305447", "0.53049403", "0.5300795", "0.5295174" ]
0.0
-1
Function that compares the intervention (current board state) with the reference (initial board state) and stores the differences between the two, both absolute and percentages.
def compare(self): self.PotTax_increase = self.PotTax_intervention - self.PotTax_reference self.PotTax_percentage = ( (self.PotTax_increase / self.PotTax_reference) * 100) """ # this sets the PotTax_percentage to actual percentages. self.PotTax_percentage['TFI'] = pd.Series( ["{0:.2f}%".format(val * 100) for val in self.PotTax_percentage['TFI']], index = self.PotTax_percentage.index) """ return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_change(self, current, previous):\n current = float(current)\n previous = float(previous)\n if current == previous:\n return 0\n try:\n r = (abs(current - previous) / previous) * 100\n if r > 100:\n r = 100\n return round(r)\n except ZeroDivisionError:\n return 100", "def mobility(self, board):\n valid_moves_computer = sum(sum(self.game.find_valid_moves(self.computer_color, board, self.board_size)))\n valid_moves_opponent = sum(sum(self.game.find_valid_moves(self.opponent_color, board, self.board_size)))\n\n if valid_moves_computer + valid_moves_opponent == 0:\n return 0\n else:\n return 100 * (valid_moves_computer - valid_moves_opponent) / (valid_moves_computer + valid_moves_opponent)", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def evaluate(self):\n # if player has no move, then player lost, -inf or inf depend on who the player is\n # if player has moves, use heuristics.\n \n #checkColorMoves = self.getAvailableMoves(self.colorIndex)\n #otherColorMoves = self.getAvailableMoves(1-self.colorIndex)\n \n checkColorMoves = self.getAvailableMovesPreferLonger(self.colorIndex)\n otherColorMoves = self.getAvailableMovesPreferLonger(1-self.colorIndex)\n\n checkColorPieces = self.getPieceCount(self.colorIndex)\n otherColorPieces = self.getPieceCount(1-self.colorIndex)\n\n #checkColorEdgePieces = self.getEgdePieceCount(self.colorIndex)\n #otherColorEdgePieces = self.getEgdePieceCount(1-self.colorIndex)\n\n if self.player == 'computer':\n if checkColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n elif otherColorMoves == 0: #user doesn't have moves\n return float('inf')\n else:\n #return checkColorPieces - otherColorPieces\n return checkColorMoves - otherColorMoves\n else:\n if checkColorMoves == 0: #user doesn't have moves\n return float('inf')\n elif otherColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n else:\n #return otherColorPieces - checkColorPieces\n return otherColorMoves - checkColorMoves", "def test_numprops_different_pct(self):\n # Perform diff.\n minus, plus = 10, 20\n df = Differ(\n key=\"name\", deltas={\"energy\": Delta(\"+{}-{}=%\".format(plus, minus))}\n )\n d = df.diff(*self.engines)\n\n # Calculate expected results.\n def is_different(a, b):\n pct = 100.0 * (b - a) / a\n return pct <= -minus or pct >= plus\n\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n\n # Check results.\n if len(d[Differ.CHANGED]) != changed:\n result = d[Differ.CHANGED]\n msg = \"Values:\\n\"\n for i, e in enumerate(self.energies):\n if not is_different(*e):\n continue\n msg += \"{:d}) {:f} {:f}\\n\".format(i, e[0], e[1])\n msg += \"Result:\\n\"\n for i, r in enumerate(result):\n msg += \"{:d}) {} {}\\n\".format(i, r[\"old\"], r[\"new\"])\n self.assertEqual(len(d[Differ.CHANGED]), changed, msg=msg)", "def relative_change(nr1, nr2):\n\n return float(((nr2 - nr1) / nr1) * 100)", "def getDiffPercent(path, path2 ):\n global ans\n ans = []\n img = Image.open( path ) \n img2 = Image.open( path2 )\n\n width, height = img.size\n width2, height2 = img2.size\n \n diff = 0\n k = 0\n\n for i in range(width): \n for j in range(height):\n rgb = img.load()[i,j]\n rgb2 = img2.load()[i,j]\n \n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 0 and rgb[1] == 0 and rgb[2] == 0 ):\n k = k+1\n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 255 and rgb[1] == 255 and rgb[2] == 255 ):\n k = k+1 \n \n diff = diff + pixelDiff(rgb, rgb2)\n\n img.close()\n img2.close()\n \n mx = 3 * 255 * ( width * height - k)\n return 100*diff/mx", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def evaluer(self):\n \n WhiteScore=0\n BlackScore=0\n \n # Parsing the board squares from 0 to 63\n for pos1,piece in enumerate(self.cases):\n\n # Material score\n if(piece.couleur=='blanc'):\n WhiteScore+=piece.valeur\n else: \n # NB : here is for black piece or empty square\n BlackScore+=piece.valeur\n\n if(self.side2move=='blanc'):\n return WhiteScore-BlackScore\n else:\n return BlackScore-WhiteScore", "def image_diff_percent(image_a, image_b):\n\n # if paths instead of image instances where passed in\n # load the images\n if isinstance(image_a, str):\n image_a = Image.open(image_a)\n\n if isinstance(image_b, str):\n image_b = Image.open(image_b)\n\n # first determine difference of input images\n input_images_histogram_diff = image_diff(image_a, image_b)\n\n # to get the worst possible difference use a black and a white image\n # of the same size and diff them\n\n black_reference_image = Image.new('RGB', image_a.size, (0, 0, 0))\n white_reference_image = Image.new('RGB', image_a.size, (255, 255, 255))\n\n worst_bw_diff = image_diff(black_reference_image, white_reference_image)\n\n percentage_histogram_diff = (input_images_histogram_diff / float(worst_bw_diff)) * 100\n\n return percentage_histogram_diff", "def compute_drift_score(ref_col_prob, col_prob):\n\n return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100)", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def outcome(self):\n if self.grid[0][0] == self.grid[1][0] == self.grid[2][0] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][1] == self.grid[1][1] == self.grid[2][1] and self.grid[0][1] != 0:\n return self.grid[0][1]\n if self.grid[0][2] == self.grid[1][2] == self.grid[2][2] and self.grid[0][2] != 0:\n return self.grid[0][2]\n if self.grid[0][0] == self.grid[0][1] == self.grid[0][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[1][0] == self.grid[1][1] == self.grid[1][2] and self.grid[1][0] != 0:\n return self.grid[1][0]\n if self.grid[2][0] == self.grid[2][1] == self.grid[2][2] and self.grid[2][0] != 0:\n return self.grid[2][0]\n if self.grid[0][0] == self.grid[1][1] == self.grid[2][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][2] == self.grid[1][1] == self.grid[2][0] and self.grid[0][2] != 0:\n return self.grid[0][2]\n return 0", "def report_result(force_a_before, force_b_before, force_a_after, force_b_after):\n damage_a = 0.0\n damage_b = 0.0\n ################################# YOUR CODE HERE #################################\n damage_a = calculate_training_cost(force_a_before) - calculate_training_cost(force_a_after)\n damage_b = calculate_training_cost(force_b_before) - calculate_training_cost(force_b_after)\n ##################################################################################\n return damage_a, damage_b", "def get_diff_and_percentage(self, first, second, state):\n difference = first - second\n per_difference = (difference / second) * 100\n total_percentage = (first / self.populations[state]) * 100\n return [difference, per_difference, total_percentage]", "def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 - score", "def calc_diffs(self, y, x, locs):\n res = {}\n \n for item, value in locs.iteritems():\n res[item] = self.slab_ratio * (self.grid[y, x] - self.grid[value['y'], value['x']])\n \n return res", "def rough_outcome(self) -> float:\n\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = (self.get_possible_moves() == []) or \\\n (count >= 0.5 * len(self.claim))\n\n result = []\n if over:\n return -1\n else:\n for move in self.get_possible_moves():\n new_state = self.make_move(move)\n if new_state.rough_outcome() == -1:\n result.append(1)\n else:\n result.append(0)\n if 1 in result:\n return 1\n return -1", "def goalDiff(state):\n xdiff = state.goalX - state.posX\n ydiff = state.goalY - state.posY\n return (xdiff, ydiff)", "def _calc_and_display_moves(self):\n az_now, alt_now = u.calc_az_alt(self.locked_longitude_degrees, self.locked_latitude_degrees,\n self.plate_ra_degrees, self.plate_dec_degrees, self.image_datetime)\n az_occ, alt_occ = u.calc_az_alt(self.locked_longitude_degrees, self.locked_latitude_degrees,\n self.locked_target_ra_degrees, self.locked_target_dec_degrees,\n self.locked_occ_datetime)\n # Cast into range [-180, +180]:\n if az_occ < az_now - 180:\n az_rightward = az_occ - az_now + 360\n else:\n az_rightward = az_occ - az_now\n if az_rightward == 0:\n self.left_right_label['text'] = '(ok now)'\n self.left_right_distance_label['text'] = NO_DATA\n self.left_right_degrees['text'] = NO_DATA\n elif az_rightward < 0:\n self.left_right_label['text'] = 'LEFT'\n self.left_right_distance_label['text'] = '{:10.2f}'.format(abs(az_rightward)).strip()\n self.left_right_degrees['text'] = ' ' + DEGREE_TEXT\n else:\n self.left_right_label['text'] = 'RIGHT'\n self.left_right_distance_label['text'] = '{:10.2f}'.format(abs(az_rightward)).strip()\n self.left_right_degrees['text'] = ' ' + DEGREE_TEXT\n\n alt_upward = alt_occ - alt_now\n if alt_upward == 0:\n self.up_down_label['text'] = '(ok now)'\n self.up_down_distance_label['text'] = NO_DATA\n self.up_down_degrees['text'] = NO_DATA\n elif alt_upward < 0:\n self.up_down_label['text'] = 'LOWER'\n self.up_down_distance_label['text'] = '{:10.2f}'.format(abs(alt_upward)).strip()\n self.up_down_degrees['text'] = ' ' + DEGREE_TEXT\n else:\n self.up_down_label['text'] = 'RAISE'\n self.up_down_distance_label['text'] = '{:10.2f}'.format(abs(alt_upward)).strip()\n self.up_down_degrees['text'] = ' ' + DEGREE_TEXT", "def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta", "def calc_diesel_equiv_captured (self):\n if self.generation_wind_proposed == 0:\n excess_percent = 0\n else:\n excess_percent = self.excess_energy / self.generation_wind_proposed\n excess_captured_percent = excess_percent * \\\n (self.cd['percent excess energy capturable'] / 100.0)\n if self.comp_specs['secondary load']:\n net_excess_energy = excess_captured_percent * \\\n self.generation_wind_proposed\n else:\n net_excess_energy = 0\n\n #~ conversion = 0.99/0.138/0.8/293\n conversion = self.cd['efficiency electric boiler']/ \\\n (1/constants.mmbtu_to_gal_HF)/ \\\n self.cd['efficiency heating oil boiler']/\\\n (constants.mmbtu_to_kWh)\n self.diesel_equiv_captured = net_excess_energy * conversion\n\n #~ print 'self.diesel_equiv_captured ',self.diesel_equiv_captured", "def percentage_change(old_value, new_value):\n\n result = float(100 * (new_value - old_value) / old_value)\n\n return result", "def fitness(im1,im2):\n\n arr1 = np.array(im1,np.int16) # Creates array of image to easily calculate the difference between pixels.\n arr2 = np.array(im2,np.int16) #np.int16 is used to change the dtype\n\n\n dif = np.sum(np.abs(arr1-arr2))\n\n return (dif/255.0 * 100)/arr1.size", "def evaluer(self):\n\n WhiteScore = 0\n BlackScore = 0\n\n # Parsing the board squares from 0 to 63\n for pos1, piece in enumerate(self.cases):\n\n # Material score\n if (piece.couleur == 'blanc'):\n WhiteScore += piece.valeur\n else:\n # NB : here is for black piece or empty square\n BlackScore += piece.valeur\n\n if (self.side2move == 'blanc'):\n return WhiteScore - BlackScore\n else:\n return BlackScore - WhiteScore", "def calculate_cable(self):\n x_houses, y_houses, x_batt, y_batt = self.get_coordinates()\n\n all_diff = []\n for x_house, y_house in list(zip(x_houses, y_houses)):\n house_diff = {}\n counter = 0\n for x, y in list(zip(x_batt, y_batt)):\n x_diff = abs(x - x_house)\n y_diff = abs(y - y_house)\n house_diff[counter] = (x_diff + y_diff)\n counter += 1\n all_diff.append(house_diff)\n\n # set as attributes\n keys_list = list(self.houses.keys())\n for i, key in enumerate(keys_list):\n self.houses[key].dist = all_diff[i]", "def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)", "def perfect_acc(abst_setpoint, gameboard):\n correct_fraction = (gameboard.ncell - gameboard.pr_mislabel * gameboard.nnoisy) / gameboard.ncell\n predicted_fraction = 1.0 - abst_setpoint\n return np.minimum(1.0, correct_fraction/predicted_fraction)", "def test_reference_conversion_factors():\n assert constants.eV == pytest.approx(1.602176565e-19)\n assert constants.eV * constants.N_A / constants.kcal == pytest.approx(23.06, 3e-5)\n assert constants.hartree * constants.N_A / constants.kcal == pytest.approx(627.5095)\n assert constants.hartree / constants.eV == pytest.approx(27.2114)\n assert constants.hartree * constants.centi / (\n constants.h * constants.c\n ) == pytest.approx(219474.63)", "def delta_func(self, st):\n res0 = st._state['visible']['reserve'][0]\n res1 = st._state['visible']['reserve'][1]\n number = st._state['visible']['number']\n if st._state['visible']['turn'] is 0:\n delta = res0-res1\n else:\n delta = res1-res0\n return number, delta", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def get_state_score(self):\n # type: () -> float\n if self.prev_state is None:\n return 0 # initial state score\n from ResearchNLP.z_experiments.experiment_util import run_classifier\n from ResearchNLP import Constants as cn\n\n # if hasattr(cn, \"last_E_out\"):\n # before = cn.last_E_out\n # else:\n before = run_classifier(self.curr_train_df, cn.validation_data_df).acc\n\n combined_df = pd.concat([self.curr_train_df, self.labeled_state_df], ignore_index=True)\n after = run_classifier(combined_df, cn.validation_data_df).acc\n\n diff = after - before # difference in acc score. NOT NORMALIZED, but its supposed to be OK\n return diff", "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def understandability(self):\n # self._understandability = - 0.33 * self.ANA + 0.33 * self.DAM - 0.33 * self.DCC + 0.34 * self.CAMC \\\n # - 0.33 * self.NOP - 0.33 * self.NOM - 0.33 * self.DSC\n self._understandability = - 0.33 * self.ANA + 0.66 * self.DAM - 0.33 * self.DCC + 0.66 * self.CAMC \\\n - 0.33 * self.NOP - 0.33 * self.NOM\n return round(self._understandability, 5)", "def envisaged_loss(self):\n loss = round(\n self.calcul_buy_nb_action() * self.stop_loss - self.investment_price(),\n 2,\n )\n percent_loss = round(loss * 100 / self.capital, 2)\n return loss, percent_loss", "def calc_change (change_amnts, rate_of_transition, from_cohort, present):\n row, col = cuda.grid(2)\n\n if row < from_cohort.shape[0] and col < from_cohort.shape[1]:\n change_amnts[row,col] = \\\n rate_of_transition[row,col] * from_cohort[row,col] \n if present[row, col] and change_amnts[row, col] > from_cohort[row, col]:\n change_amnts[row, col] = from_cohort[row,col]", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def recall(self) -> float:\n if self.ref_ignored:\n num_ref_ignored = len(self.ref_set) - len(self.ref_unignored_set)\n self.num_ignored += num_ref_ignored\n # True Positive = the number of unignored reference mappings that are Positive\n tp = len(self.ref_unignored_set.intersection(self.pre_set))\n # False Negative = the number of unignored reference mappings that are Negative\n fn = len(self.ref_set) - tp - num_ref_ignored\n return tp / (tp + fn)", "def compare(cls, data_hist, ref_hist, abs_band):\n\n if not abs_band: # 3*sigma tolerance band from reference\n abs_band = 3*ref_hist.GetRMS()\n abs_band = abs(abs_band) # force +ve definite band\n frac_above = frac_above_threshold(data_hist, abs_band)\n frac_below = frac_below_threshold(data_hist, -abs_band)\n frac_outside = frac_above + frac_below\n debug('Fraction outside: {}, above: {}, below: {}'.\n format(frac_outside, frac_above, frac_below))\n if frac_outside > 0.01:\n return cls.create_final_dict(Score(0), ERROR_LEVELS.ERROR)\n else:\n return cls.create_final_dict(Score(100), ERROR_LEVELS.OK)", "def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0", "def _create_diff_state(self, cur_state):\n diffState = {}\n for key in list(self.EndState.keys()):\n curCnt = 0\n try:\n curCnt = cur_state.Deck[key]\n except KeyError:\n pass\n\n diff = self.EndState[key] - curCnt\n if diff > 0:\n diffState[key] = CardCosts[key]\n\n return diffState", "def mask_percentage(self):\n return 100 - self.tissue_percentage", "def stability(self, board):\n # Stable stones\n computer_board = self.get_stable_stones(board, self.computer_num)\n computer_stable = sum(sum(computer_board == 100))\n opponent_board = self.get_stable_stones(board, self.opponent_num)\n opponent_stable = sum(sum(opponent_board == 100))\n\n # Unstable stones are the ones which can be flanked in the next move\n computer_board = self.get_unstable_stones(board, self.opponent_color, self.computer_num,\n self.opponent_num, computer_board)\n computer_unstable = sum(sum(computer_board == 200))\n opponent_board = self.get_unstable_stones(board, self.computer_color, self.opponent_num,\n self.computer_num, opponent_board)\n opponent_unstable = sum(sum(opponent_board == 200))\n # the reset is semi stable with weight 0, so it is not important\n computer_stability = computer_stable - computer_unstable\n opponent_stability = opponent_stable - opponent_unstable\n\n if computer_stable + opponent_stable != 0:\n return 100 * (computer_stable - opponent_stable) / (computer_stable + opponent_stable)\n else:\n return 0", "def _hill_diff_diff(self, position):\n if position < 0:\n return 2\n else:\n return position * ((75 * (position ** 2)/((1 + 5 * position**2)**2.5)) - 5/((1 + 5 * position ** 2)**2.5)) \\\n - 10 * position/((1 + 5 * position ** 2)**1.5)", "def approximately_equal(new_data, old_data):\n absolute_difference = 0\n for i in [0, 1] if len(new_data) == 3 else [0, 1, 2]:\n new = np.array(new_data[i])\n old = np.array(old_data[i])\n absolute_difference += np.sum(np.abs(new-old))/np.sum(np.abs(new)+np.abs(old))\n print(absolute_difference)\n return absolute_difference", "def calculateCurrentPercentageChange(self, Prices):\n threeDayMovingAverage = self.calculateLatestThreeDayMA(Prices)\n fifteenDayMovingAverage = self.calculateLatestFifteenDayMA(Prices)\n percentageChange = self.calculatePercentChange(\n fifteenDayMovingAverage, threeDayMovingAverage)\n return percentageChange", "def calculate_advantage(stage_0, stage_1):\n # Improvement in hp difference is good.\n hp_pct_0 = (float(stage_0.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_0.enemy_life)/MAX_ENEMY_LIFE)\n hp_pct_1 = (float(stage_1.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_1.enemy_life)/MAX_ENEMY_LIFE)\n return hp_pct_1 - hp_pct_0", "def calc_diffs(i, j, correct_x, correct_y):\n return abs(i - correct_x) + abs(j - correct_y)", "def create_percent_diff(area_high: np.array, area_low: np.array) -> np.array:\n diff = np.round(\n (area_high.astype(float) - area_low.astype(float)) / 256 * 100\n ) + 100\n assert (diff < 0).sum() + (diff > 200).sum() == 0\n return diff.astype(\"uint8\")", "def compare(game,guess):\n\n return [abs(x-y) for x,y in zip(game,guess)]", "def __straightness_correction(self):\n self.elapsed_ticks_left, self.elapsed_ticks_right = \\\n read_enc_ticks(self.initial_ticks_left, self.initial_ticks_right)\n\n print(\"L: \" + str(self.elapsed_ticks_left) + \"\\tR: \" + str(self.elapsed_ticks_right))\n\n # Handle invalid encoder readings\n if self.elapsed_ticks_left < 0 and self.elapsed_ticks_right < 0:\n print(\"Bad encoder reading\")\n return (0, 0)\n if self.elapsed_ticks_left > self.elapsed_ticks_right:\n print(\"Right slow\")\n return (-get_inc(self.speed), get_inc(self.speed))\n elif self.elapsed_ticks_left < self.elapsed_ticks_right:\n print(\"Left slow\")\n return (get_inc(self.speed), -get_inc(self.speed))\n else:\n print(\"Equal\")\n return (0, 0)", "def check_allele_freq_diff(self):\r\n \r\n if not self.old or not self.new:\r\n self.freq_diff = \"NA\"\r\n self.pvalue = \"NA\"\r\n \r\n else:\r\n old_frq = self.old.alt_percent\r\n new_frq = self.new.alt_percent\r\n \r\n if old_frq == 0 or new_frq == 0:\r\n self.freq_diff = \"NA\"\r\n \r\n else:\r\n self.freq_diff = abs(old_frq-new_frq)", "def __complement_quality(self) -> float:\n group = np.zeros(shape=self.Dataset.size)\n np.put(group, self.get_cover(), 1)\n\n time = self.Dataset.survival\n status = self.Dataset.status\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue", "def assurance(a, b):\n return a - b", "def _calc_atmos_refco(self, bar_press_mbar, temp_degc, rh_pct, wl_mm):\n rh_frac = rh_pct / 100.0\n refa, refb = erfa.refco(bar_press_mbar, temp_degc, rh_frac, wl_mm)\n return (refa, refb)", "def compute_utility(board, color):\n player1_score = 0\n player2_score = 0\n\n score = get_score(board)\n if color == 1:\n return score[0] - score[1]\n else:\n return score[1] - score[0]", "def evaluate(self, board):\r\n\r\n self_moves = self.find_possible_moves(board, self.my_color)\r\n opponent_moves = self.find_possible_moves(board, self.opponent_color)\r\n\r\n mobility = 0 # Mobility captures Self's profit in amount of available moves\r\n disk_parity = 0 # Disk parity captures Self's profit in raw disk amount\r\n corners = 0 # Corners captures Self's profit in occupied corners\r\n corner_proximity = 0 # Corner proximity captures the risk of giving away a free corner\r\n stability = 0 # Stability captures Self's profit in unflippable disks\r\n\r\n # Calculating mobility heuristic\r\n self_immediate_mobility = len(self_moves)\r\n opponent_immediate_mobility = len(opponent_moves)\r\n\r\n if self_immediate_mobility + opponent_immediate_mobility != 0:\r\n mobility = 100 * (self_immediate_mobility - opponent_immediate_mobility) / (self_immediate_mobility + opponent_immediate_mobility)\r\n\r\n # Calculate disk parity heuristic\r\n self_disks = self.get_disk_count(self.my_color, board)\r\n opponent_disks = self.get_disk_count(self.opponent_color, board)\r\n\r\n disk_parity = 100 * (self_disks - opponent_disks) / (self_disks + opponent_disks)\r\n\r\n # Calculating corner heuristic\r\n corners_list = [(0,0), (0,7), (7,0), (7,7)]\r\n self_corners = 0\r\n opponent_corners = 0\r\n\r\n for corner in corners_list:\r\n if board[corner[0]][corner[1]] == self.my_color:\r\n self_corners += 1\r\n if board[corner[0]][corner[1]] == self.opponent_color:\r\n opponent_corners += 1\r\n\r\n if self_corners + opponent_corners != 0:\r\n corners = 100 * (self_corners - opponent_corners) / (self_corners + opponent_corners)\r\n\r\n # Calculating corner proximity heuristic\r\n corners_proximity_list = [(0, 1), (1, 0), (1, 1), (0, 6), (1, 6), (1, 7), (6, 0), (6, 1), (7, 1), (6, 6), (7, 6), (6, 7)]\r\n self_corner_proximity = 0\r\n opponent_corner_proximity = 0\r\n\r\n for cell in corners_proximity_list:\r\n if board[cell[0]][cell[1]] == self.my_color:\r\n self_corner_proximity += 1\r\n if board[cell[0]][cell[1]] == self.opponent_color:\r\n opponent_corner_proximity += 1\r\n\r\n if self_corner_proximity + opponent_corner_proximity != 0:\r\n corner_proximity = 100 * (self_corner_proximity - opponent_corner_proximity) / (self_corner_proximity + opponent_corner_proximity)\r\n\r\n # Calculating stability heuristic\r\n self_stability = self.get_stable_disks(board, self.my_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 7))\r\n\r\n opponent_stability = self.get_stable_disks(board, self.opponent_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 7))\r\n\r\n if self_stability + opponent_stability != 0:\r\n stability = 100 * (self_stability - opponent_stability) / (self_stability + opponent_stability)\r\n\r\n # Calculating the final value\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n # In early-game, focus on maximal mobility and stability. Avoid amassing too many disks.\r\n if disk_total < 15:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 30 * mobility + \\\r\n 30 * stability\r\n\r\n # In mid-game, focus on capturing corners and further building stability\r\n elif disk_total < 45:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 20 * mobility + \\\r\n 35 * stability\r\n\r\n # In late-game, focus on getting as many discs as possible\r\n else:\r\n heuristic_value = 30 * corners + \\\r\n 15 * mobility + \\\r\n 30 * stability + \\\r\n 35 * disk_parity\r\n\r\n return heuristic_value", "def compute_mc_acceptance(self):\n if self.steps > 0:\n self.move_viability = \\\n (1. * self.viablesteps) / self.steps\n if self.viablesteps > 0:\n self.acceptance = float(self.acceptedsteps)/float(self.viablesteps)\n else:\n self.acceptance = 0.0\n else:\n self.move_viability = 0.0\n self.acceptance = 0.0", "def expected_result(self, other):\r\n return float(1) / (1 + math.pow(10, float(other.elo - self.elo) / DIVIDER))", "def percent_changes(self):\n\n # close_t = float(val[\"klines\"][\"1m\"].get(self.mw.cfg_manager.pair, {})[-5][4])\n klines_data = self.mw.klines.get(\"1m\")\n coin_data = klines_data.get(self.mw.cfg_manager.pair)\n\n if isinstance(coin_data, list):\n close_5m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-5][4])\n close_15m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-15][4])\n # close_30m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-30][4])\n close_1h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-60][4])\n close_4h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-240][4])\n\n change_5m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_5m)) - 1) * 100\n change_15m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_15m)) - 1) * 100\n # change_30m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_30m)) - 1) * 100\n change_1h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_1h)) - 1) * 100\n change_4h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_4h)) - 1) * 100\n\n change_1d_value = float(val[\"tickers\"][self.mw.cfg_manager.pair][\"priceChangePercent\"])\n\n\n changes = [self.mw.change_5m, self.mw.change_15m, self.mw.change_1h, self.mw.change_4h, self.mw.change_1d]\n change_values = [change_5m_value, change_15m_value, change_1h_value, change_4h_value, change_1d_value]\n\n for i, change in enumerate(changes):\n if change_values[i] > 0:\n operator = \"+\"\n color = Colors.color_green\n elif change_values[i] < 0:\n operator = \"\"\n color = Colors.color_pink\n else:\n operator = \"\"\n color = Colors.color_grey\n\n # print(str(change))\n change.setText(\"<span style='color: \" + color + \"'>\" + operator + \"{0:.2f}\".format(change_values[i]) + \"%</span\")", "def compare(cls, data_hist, ref_hist, tolerance):\n dmean = abs(data_hist.GetMean() - ref_hist.GetMean())\n dwidth = abs(data_hist.GetRMS() - ref_hist.GetRMS())\n score = 70.0 * (dmean < abs(0.3*ref_hist.GetRMS()))\n score += 30.0 * (dwidth < abs(tolerance*ref_hist.GetRMS()))\n if score > 70.0: # both passes: 100\n level = ERROR_LEVELS.OK\n elif score >= 30.0: # only one passes: 70 or 30\n level = ERROR_LEVELS.WARNING\n else: # both fails: 0\n level = ERROR_LEVELS.ERROR\n debug('score: {}, level: {}'.format(score, level))\n return cls.create_final_dict(Score(score), level)", "def delta_e_76(lab1, lab2):\n\n l1, a1, b1 = lab1\n l2, a2, b2 = lab2\n return (l1 - l2) ** 2 + (a1 - a2) ** 2 + (b1 - b2) ** 2", "def obtain_percent_changes(self):\n\n results = []\n \n for bond in self.ts.bonds:\n i, j = bond.atom_indices\n before = self.pre_geometry.get_distance(i, j)\n after = self.post_geometry.get_distance(i, j)\n results.append([bond.index, bond.atom_indices,\n bond.reaction_center, percent_change(before, after)])\n\n self.percent_changes = pd.DataFrame(results, columns=[\"index\", \"atom_indices\", \"center\", \"percent_change\"])\n\n return self.percent_changes", "def test_value_change(self):\n before = self.data.diffusion_data[:, :, 0, 0]\n after = module_05.run_module(self.data).diffusion_data[:, :, 0, 0]\n self.assertFalse(np.all(before == after))", "def ComputeChangeInStateOfCharge(self):\r\n pass", "def completeness_of_game(game):\n spaces = game.width * game.height\n played_spaces = len([x for x in game._board_state[:-3] if x == 1])\n return float(played_spaces / spaces)", "def calc_fall_diff(self):\n ch1_fall = self.read_channel(2)\n ch2_fall = self.read_channel(3)\n if (len(ch1_fall)==0) or (len(ch2_fall)==0):\n print(\"TDC readout error\")\n return \n\n try:\n ch1_i=0\n ch2_i=0\n fall_diff_list=[]\n while True:\n fall_diff = ch1_fall[ch1_i] - ch2_fall[ch2_i]\n if abs(fall_diff)<50000000000: # 50ms\n fall_diff_list.append(fall_diff)\n ch1_i = ch1_i + 1\n ch2_i = ch2_i + 1\n elif fall_diff>0:\n if fall_diff>500000000000: # 0.5s\n ch1_i = ch1_i + 1\n else:\n ch2_i = ch2_i + 1\n else:\n if fall_diff<-500000000000:\n ch2_i = ch2_i + 1\n else:\n ch1_i = ch1_i + 1\n except Exception as e:\n pass\n return fall_diff_list", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def migration_check(self):\r\n\r\n from land import non_gtgp_part_list, gtgp_part_list, non_gtgp_area_list\r\n\r\n self.non_gtgp_area = non_gtgp_area_list[self.hh_id]\r\n\r\n if num_labor_list[self.hh_id] != 0:\r\n non_gtgp_land_per_labor = self.non_gtgp_area / num_labor_list[self.hh_id]\r\n else:\r\n non_gtgp_land_per_labor = 0\r\n try:\r\n remittance = random.normalvariate(1200, 16000)\r\n # 1200 is the mean, and 400^2 is the st. dev. according to the original pseudocode, but this seems strange\r\n except:\r\n remittance = 0\r\n if remittance < 0:\r\n remittance = 0\r\n self.mig_remittances = float(remittance)\r\n if self.hh_id in non_gtgp_part_list:\r\n self.gtgp_part = 0\r\n elif self.hh_id in gtgp_part_list:\r\n self.gtgp_part = 1\r\n prob = math.exp(2.07 - 0.00015 * float(self.income_local_off_farm) + 0.67 * float(num_labor_list[self.hh_id])\r\n + 4.36 * float(self.migration_network) - 0.58 * float(non_gtgp_land_per_labor)\r\n + 0.27 * float(self.gtgp_part) - 0.13 * float(self.age) + 0.07 * float(self.gender)\r\n + 0.17 * float(self.education) + 0.88 * float(self.marriage) +\r\n 1.39 * float(self.work_status) + 0.001 * float(self.mig_remittances)) # Shuang's formula\r\n mig_prob = prob / (prob + 1)\r\n if random.random() < mig_prob and hh_size_list[self.hh_id] >= 2: # out-migration occurs\r\n if hh_migration_flag[self.hh_id] == 0: # only one migrant allowed per household at a time\r\n hh_size_list[self.hh_id] -= 1\r\n self.past_hh_id = self.hh_id\r\n self.migration_status = 1\r\n from land import household_income_list\r\n household_income_list[self.past_hh_id] += self.mig_remittances\r\n if self.unique_id in head_of_household_list:\r\n head_of_household_list[self.past_hh_id] = 0\r\n former_hoh_list[self.hh_id] = self.unique_id\r\n self.resource_frequency = self.resource_frequency * 0.5\r\n hh_migration_flag[self.hh_id] = 1\r\n if self.work_status == 1:\r\n num_labor_list[self.hh_id] -= 1\r\n if self.unique_id in labor_list:\r\n labor_list.remove(self.unique_id)\r\n if self.hh_id not in total_migration_list:\r\n total_migration_list[self.hh_id] = 1\r\n self.work_status = 0\r\n\r\n self.hh_id = 'Migrated'", "def check_performance(self):\n self.lg.debug('Checking performance.')\n avg_up = (sum(self.results_up)) / len(self.results_up)\n avg_down = (sum(self.results_down)) / len(self.results_down)\n if (\n avg_up < self.tolerance * self.up or\n avg_down < self.tolerance * self.down\n ):\n self.bad_performance = True\n else:\n self.bad_performance = False", "def evaluate_binary_consistency(self):\n\n change_rw = 0\n change_sm = 0\n th = [0.005]\n for threshold in th:\n raw_th = [self.rw_data[t] > threshold for t in range(0, self.T)]\n smooth_th = [self.smth_data[t] > 0 for t in range(0, self.T)]\n # print(\"Zeros rw:\", get_avg_zeros_per_row(raw_th))\n # print(\"Zeros sm:\", get_avg_zeros_per_row(self.smth_data))\n change_rw = change_rw + self.change_of_network_over_time(raw_th)\n change_sm = change_sm + self.change_of_network_over_time(smooth_th)\n\n change_rw = change_rw / len(th)\n change_sm = change_sm / len(th)\n\n return change_rw, change_sm", "def diff(self, content):\n\n self.differ.set_seq2(self.make_hash_sequence(content))\n percent_diff = (1.0 - self.differ.ratio()) * 100.0\n percent_diff = 1 if 0 < percent_diff < 1 else int(round(percent_diff, 0))\n\n if percent_diff != 0 and len(content) < self.expected_length:\n percent_diff *= -1\n\n return percent_diff", "def stats_change(self):\n return True if self.board.prev_state != self.board.shot_count else False", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def estimate(self, states):\n scores = [state.get_score() for state in states]\n return np.array([score[0] - score[1] for score in scores])", "def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)", "def efficiency (self, base_hist):\n keep = self\n orig = base_hist\n rej = orig - keep\n\n eff = keep / orig\n nkeep = keep.values\n nrej = rej.values\n eff.errors = np.sqrt (\n (nrej / (nkeep+nrej)**2 * keep.errors)**2\n + (-nkeep / (nkeep+nrej)**2 * rej.errors)**2 )\n return eff", "def efficiency (self, base_hist):\n keep = self\n orig = base_hist\n rej = orig - keep\n\n eff = keep / orig\n nkeep = keep.values\n nrej = rej.values\n eff.errors = np.sqrt (\n (nrej / (nkeep+nrej)**2 * keep.errors)**2\n + (-nkeep / (nkeep+nrej)**2 * rej.errors)**2 )\n return eff", "def get_reward(self):\n\t\tdist = np.sqrt(np.sum(np.square(np.asarray(self.state) - np.asarray(self.goal))))\n\n\t\tdist_diff = self.prev_dist - dist\n\t\tself.reward = dist_diff * 10\n\n\t\tself.prev_dist = dist", "def get_reward(state, resolution, grid_x, grid_y):\n a,b = single_index_to_index(state, resolution)\n position = index_to_obs(a, b, grid_x, grid_y )[0]\n if position >= 0.5:\n return 0\n return -1", "def calculate_area_diff(gt_area, perf_area):\n return abs(gt_area - perf_area) / gt_area", "def calculate_utility(state, player):\n thisPlayer = player\n \n if state.winner() == (not thisPlayer):\n return -BigInitialValue\n if state.winner() == thisPlayer:\n return BigInitialValue\n return calculate_possible_fours(state, thisPlayer) - calculate_possible_fours(state, not thisPlayer)", "def diffImage(storedFrame,currentFrame,pixThreshold):\n\n diff = cv2.absdiff(storedFrame,currentFrame)\n _,diff = cv2.threshold(diff,pixThreshold[0],255,cv2.THRESH_BINARY)\n diff = diff / 255\n return diff", "def acceptance(h_old, h_new):\n\n return float(-h_new + h_old)", "def compareTwoReco(reference, new, histos, debug=1):\n\n # Tracks with index False are the ones that have been matched to the reference track collection\n new_valid = [True for i in new]\n\n # Tracks with index False are the ones that have been matched to the comparison track collection\n original_valid = [True for i in reference]\n print \" \".join(\"%10s\" % k for k in variables)\n debug_verbose = checkDebug(debug, 'Verbose')\n debug_ordinary = checkDebug(debug, 'Ordinary')\n debug_recovery = checkDebug(debug, 'Recovery')\n debug_lost = checkDebug(debug, 'Lost')\n debug_fake = checkDebug(debug, 'Fake')\n\n for original_index, original in enumerate(reference):\n # Fill in cumulative plots for the reference sample first\n histos['reference_hits_vs_algo'].Fill(original.algo, original.hits)\n histos['reference_hits_vs_orialgo'].Fill(original.orialgo, original.hits)\n histos['reference_hits_vs_pt'].Fill(original.pt, original.hits)\n histos['den'].Fill(original.pt)\n histos['den_eta'].Fill(original.eta)\n histos['den_phi'].Fill(original.phi)\n histos['den_hits'].Fill(original.hits)\n histos['den_algo'].Fill(original.algo)\n histos['den_orialgo'].Fill(original.orialgo)\n\n # Now start to look for a matching track in the comparison track collection\n window_depth = 400 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \" \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \" \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT:\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n assert original.run == new[iBest].run, \"run mismatch\"\n assert original.ls == new[iBest].ls, \"ls mismatch\"\n assert original.event == new[iBest].event, \"event mismatch\"\n if debug_ordinary:\n print original\n print new[iBest]\n print iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch, '\\n'\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_phi'].Fill(new[iBest].phi)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n # Let's try a recovery loop with somewhat lesser stringent cuts\n for original_index, original in enumerate(reference):\n if original_valid[original_index]:\n # Now start to look for a matching track in the comparison track collection\n window_depth = 300 # elements to span to look for best candidate\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = -1, 100, 100\n if debug_verbose:\n print \"Recovery \", original\n for i,j in enumerate(new):\n if new_valid[i] == True:\n if debug_verbose:\n print \"Recovery \", i, j\n if window_depth == 0:\n break\n dr_squared, dPt_over_pt = match(original, j)\n if dr_squared < bestDeltaRMatch*bestDeltaRMatch and dPt_over_pt < DELTA_PT_OVER_PT_CUT*6:\n iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch = i, dr_squared, dPt_over_pt\n if debug_verbose:\n print \"Recovery \", window_depth, iBest, bestDeltaRMatch, dr_squared, bestDeltaPt_over_PtMatch, dPt_over_pt\n if bestDeltaRMatch <= 0.0001 or bestDeltaPt_over_PtMatch == 0.0001:\n break\n window_depth -= 1\n if iBest != -1 and bestDeltaRMatch < DELTA_R_CUT*10: # inflate cut on DeltaR to recover some good-medium matching\n # These are the tracks in the reference track collection\n # that have been matched to a track in the comparison\n # track collection\n new_valid[iBest] = False\n original_valid[original_index] = False\n if debug_recovery:\n print \"Recovery \", original\n print \"Recovery \", new[iBest]\n print \"Recovery \", iBest, bestDeltaRMatch, bestDeltaPt_over_PtMatch\n histos['num'].Fill(original.pt)\n histos['num_eta'].Fill(original.eta)\n histos['num_phi'].Fill(original.phi)\n histos['num_hits'].Fill(original.hits)\n histos['num_algo'].Fill(original.algo)\n histos['num_orialgo'].Fill(original.orialgo)\n histos['fake_num'].Fill(new[iBest].pt)\n histos['fake_num_eta'].Fill(new[iBest].eta)\n histos['fake_num_hits'].Fill(new[iBest].hits)\n histos['fake_num_algo'].Fill(new[iBest].algo)\n histos['fake_num_orialgo'].Fill(new[iBest].orialgo)\n histos['comparison_algo_vs_reference_algo'].Fill(original.algo, new[iBest].algo)\n histos['comparison_orialgo_vs_reference_orialgo'].Fill(original.orialgo, new[iBest].orialgo)\n histos['comparison_hits_vs_reference_hits'].Fill(original.hits, new[iBest].hits)\n\n\n # These are the tracks in the reference track collection\n # that have *not* been associated to any track in the\n # comparison collection == > LOST TRACKS\n reference_not_assigned = [j for i,j in enumerate(reference) if original_valid[i]]\n reference_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_lost:\n print \"**** Lost tracks **** %d\" % len(reference_not_assigned)\n for j in reference_not_assigned:\n histos['lost_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['lost_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['lost_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['lost_eta'].Fill(j.eta)\n if debug:\n print j\n if debug_lost:\n print \"**** End of Lost tracks ****\"\n\n # Fake Tracks\n for i, j in enumerate(new):\n # Fill in the cumulative plots related to tracks in the comparison track collection\n histos['comparison_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['comparison_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['comparison_hits_vs_pt'].Fill(j.pt, j.hits)\n histos['fake_den'].Fill(j.pt)\n histos['fake_den_eta'].Fill(j.eta)\n histos['fake_den_phi'].Fill(j.phi)\n histos['fake_den_hits'].Fill(j.hits)\n histos['fake_den_algo'].Fill(j.algo)\n histos['fake_den_orialgo'].Fill(j.orialgo)\n\n # These are the tracks in the comparison track collection\n # that have *not* been associated to any track in the\n # reference collection ==> FAKE TRACKS\n new_not_assigned = [j for i,j in enumerate(new) if new_valid[i]]\n new_not_assigned.sort(key=lambda tr: tr.algo)\n if debug_fake:\n print \"**** Fake tracks **** %d\" % len(new_not_assigned)\n for j in new_not_assigned:\n histos['fake_hits_vs_algo'].Fill(j.algo, j.hits)\n histos['fake_hits_vs_orialgo'].Fill(j.orialgo, j.hits)\n histos['fake_hits_vs_pt'].Fill(j.pt, j.hits)\n if debug:\n print j\n if debug_fake:\n print \"**** End of Fake tracks ****\"", "def calculate_abs(self):\n ref_spectra_raw = np.array(self.raw_data['spectrum_0'].attrs['reference'])\n self.ref_spectra_arr = np.subtract(ref_spectra_raw,self.back_spectra_arr)\n abs=-np.log10(self.pre_proc_data.div(self.ref_spectra_arr))\n self.abs_data=abs\n return self.abs_data", "def calc_difference_from_reference(inputs, outputs, verbose=True):\n\n # Get a list of reference input/output files\n filename_ref_inputs = glob.glob(lal_cuda.full_path_datafile(\"inputs.dat*\"))\n filename_ref_outputs = [\n filename_ref_input_i.replace(\n \"inputs.dat\",\n \"outputs.dat\") for filename_ref_input_i in filename_ref_inputs]\n\n # Look to see if the given inputs are in the stored reference inputs\n filename_ref_output = None\n for filename_ref_input_i, filename_ref_output_i in zip(filename_ref_inputs, filename_ref_outputs):\n inputs_i = inputs.read(filename_ref_input_i)\n\n # Check to see if this set of inputs matches the set that has been passed\n if(inputs_i == inputs):\n inputs_ref = inputs_i\n filename_ref_output = filename_ref_output_i\n break\n\n # Perform check if a match has been found\n if(not filename_ref_output):\n lal_cuda.log.warning(\n \"Checking could not be performed: reference data set with given inputs (%s) not found.\" %\n (inputs))\n else:\n if(verbose):\n lal_cuda.log.open('Performing test...')\n\n # Read reference dataset's outputs\n outputs_ref = outputs.read(filename_ref_output)\n\n # Compute statistics of difference from test reference\n hpval_real_diff_avg = 0.\n hpval_imag_diff_avg = 0.\n hcval_real_diff_avg = 0.\n hcval_imag_diff_avg = 0.\n hpval_real_diff_max = 0.\n hpval_imag_diff_max = 0.\n hcval_real_diff_max = 0.\n hcval_imag_diff_max = 0.\n for (hp_i, hc_i, hp_ref_i, hc_ref_i) in zip(outputs.hp, outputs.hc, outputs_ref.hp, outputs_ref.hc):\n hpval_real_diff_i = calc_frac_diff(hp_i.real, hp_ref_i.real)\n hpval_imag_diff_i = calc_frac_diff(hp_i.imag, hp_ref_i.imag)\n hcval_real_diff_i = calc_frac_diff(hc_i.real, hc_ref_i.real)\n hcval_imag_diff_i = calc_frac_diff(hc_i.imag, hc_ref_i.imag)\n hpval_real_diff_avg += hpval_real_diff_i\n hpval_imag_diff_avg += hpval_imag_diff_i\n hcval_real_diff_avg += hcval_real_diff_i\n hcval_imag_diff_avg += hcval_imag_diff_i\n hpval_real_diff_max = max([hpval_real_diff_max, hpval_real_diff_i])\n hpval_imag_diff_max = max([hpval_imag_diff_max, hpval_imag_diff_i])\n hcval_real_diff_max = max([hcval_real_diff_max, hcval_real_diff_i])\n hcval_imag_diff_max = max([hcval_imag_diff_max, hcval_imag_diff_i])\n hpval_real_diff_avg /= float(len(outputs.hp))\n hpval_imag_diff_avg /= float(len(outputs.hp))\n hcval_real_diff_avg /= float(len(outputs.hc))\n hcval_imag_diff_avg /= float(len(outputs.hc))\n\n # Report results\n if(verbose):\n lal_cuda.log.comment(' Average/maximum real(hp) fractional difference: %.2e/%.2e' %\n (hpval_real_diff_avg, hpval_real_diff_max))\n lal_cuda.log.comment(' Average/maximum imag(hp) fractional difference: %.2e/%.2e' %\n (hpval_imag_diff_avg, hpval_imag_diff_max))\n lal_cuda.log.comment(' Average/maximum real(hc) fractional difference: %.2e/%.2e' %\n (hcval_real_diff_avg, hcval_real_diff_max))\n lal_cuda.log.comment(' Average/maximum imag(hc) fractional difference: %.2e/%.2e' %\n (hcval_imag_diff_avg, hcval_imag_diff_max))\n lal_cuda.log.close(\"Done.\")\n\n return {\n 'hpval_real_diff_avg': hpval_real_diff_avg,\n 'hpval_real_diff_max': hpval_real_diff_max,\n 'hpval_imag_diff_avg': hpval_imag_diff_avg,\n 'hpval_imag_diff_max': hpval_imag_diff_max,\n 'hcval_real_diff_avg': hcval_real_diff_avg,\n 'hcval_real_diff_max': hcval_real_diff_max,\n 'hcval_imag_diff_avg': hcval_imag_diff_avg,\n 'hcval_imag_diff_max': hcval_imag_diff_max}", "def test_paired_difference_analyses(self):\r\n actual = paired_difference_analyses(\r\n self.personal_ids_to_state_values1,\r\n ['firmicutes-abundance',\r\n 'bacteroidetes-abundance'],\r\n ['Pre', 'Post'],\r\n output_dir=self.test_out,\r\n ymin=0.0,\r\n ymax=1.0)\r\n self.assertTrue(exists(join(self.test_out,\r\n 'paired_difference_comparisons.txt')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'firmicutes-abundance.pdf')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))\r\n # three output paths returned\r\n self.assertEqual(len(actual[0]), 5)\r\n # expected t values returned, they should be less than (firmicutes) or greater (bacteroidetes) than 2 \r\n self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)\r\n self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))", "def cycle_consistency_loss(self, ra, rb, fa, fb):\n with tf.device(\"/gpu:0\"):\n backward_loss = tf.reduce_mean(tf.abs(self.Ga2b(fa) - rb))\n with tf.device(\"/gpu:1\"):\n forward_loss = tf.reduce_mean(tf.abs(self.Gb2a(fb) - ra))\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return loss", "def vizDifference(diff):\n return (((diff - diff.min()) / (diff.max() - diff.min())) * 255).astype(np.uint8)", "def getCurrentState (events_counters, states):\n gamma_raw = 0\n if events_counters[0] + events_counters[2] == 0:\n gamma_raw = -1000\n else:\n gamma_raw = float (events_counters[0]) / (float (events_counters[0]) +\n float (events_counters[2])) \n\n theta_raw = 0\n if events_counters[1] + events_counters[3] == 0:\n theta_raw = -1000\n else: \n theta_raw = float (events_counters[1]) / (float (events_counters[1]) +\n float (events_counters[3]))\n\n #print (\"gamma_raw = {}; theta_raw = {}\".format (gamma_raw, theta_raw))\n min_dist1 = 1\n target_ind1 = 0\n min_dist2 = 1\n target_ind2 = 0 \n for ind1 in range (len (states[0])):\n if math.fabs (states[0][ind1] - gamma_raw) <= min_dist1:\n min_dist1 = math.fabs (states[0][ind1] - gamma_raw)\n target_ind1 = ind1\n\n for ind2 in range (len (states[1])):\n if math.fabs (states[1][ind2] - theta_raw) <= min_dist2:\n min_dist2 = math.fabs (states[1][ind2] - theta_raw)\n target_ind2 = ind2\n #print (\"gamma = {}; theta = {}\".format (states[0][target_ind1], states[1][target_ind2]))\n return (target_ind1, target_ind2)", "def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)", "def compute_utility(self, board, move, player):\n r_alive = 0\n b_alive = 0\n rk_alive = 0\n bk_alive = 0\n for line in range(8):\n for col in range(8):\n if board[line][col] == \"R\":\n r_alive += 1\n elif board[line][col] == \"B\":\n b_alive += 1\n elif board[line][col] == \"RK\":\n rk_alive += 1\n elif board[line][col] == \"BK\":\n bk_alive += 1\n # if r_Alive > b_Alive:\n # if b_Alive == 0:\n # return 1\n # else: return 0\n # elif r_Alive == 0:\n # return -1\n powkings = 1.2\n result = 0\n if player == 'B':\n result = rk_alive*powkings + r_alive - bk_alive*powkings - b_alive\n else:\n result = bk_alive*powkings + b_alive - rk_alive*powkings - r_alive\n return result", "def utility(self):\n\n # -1 0 +1 for win / loss\n return check_victory(self.board)", "def _calculate_reward( self, old_state_vars, new_state_vars ):\n if 'total-wealth' == self.objective:\n x_old = old_state_vars.ptf_asset_vals.sum()\n x_new = new_state_vars.ptf_asset_vals.sum()\n elif 'relative-profit' == self.objective:\n x_old = old_state_vars.ptf_asset_vals.sum() - old_state_vars.bmk_asset_vals.sum()\n x_new = new_state_vars.ptf_asset_vals.sum() - new_state_vars.bmk_asset_vals.sum()\n else:\n raise ValueError( 'Unsupported objective: {}'.format(objective) )\n\n # Discount the objective before applying the utility function\n gamma = self.get_gamma()\n reward = self.utility_fun( gamma * x_new ) - self.utility_fun( x_old )\n return reward", "def compare_values((covered, total), tolerance):\n missing = total - covered\n return cmp(tolerance, missing)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc" ]
[ "0.62179923", "0.60718656", "0.6032007", "0.59902894", "0.5899323", "0.5853778", "0.58384997", "0.5829881", "0.56770056", "0.56709003", "0.5670638", "0.5651848", "0.5637561", "0.56240946", "0.5624037", "0.5612395", "0.5599351", "0.5595555", "0.5589821", "0.5587549", "0.5574666", "0.5565435", "0.5541517", "0.55155593", "0.5503293", "0.54584146", "0.5456842", "0.5456822", "0.54561543", "0.5454164", "0.5453146", "0.5441554", "0.5439348", "0.54371256", "0.5429054", "0.5427876", "0.5424996", "0.5412574", "0.5402556", "0.5400974", "0.5377762", "0.5373265", "0.5368605", "0.5359455", "0.5357935", "0.53563607", "0.5353579", "0.5352371", "0.5345785", "0.5334986", "0.533369", "0.53245264", "0.5318085", "0.5314686", "0.53108096", "0.53103995", "0.53085136", "0.53075594", "0.53071594", "0.5299389", "0.52955186", "0.5293208", "0.5285021", "0.5281851", "0.5274656", "0.5274252", "0.52677494", "0.5265176", "0.5263324", "0.52595717", "0.5247295", "0.52361345", "0.5234239", "0.523192", "0.52202225", "0.52193534", "0.52163476", "0.5201941", "0.51882845", "0.5185568", "0.5185568", "0.51836866", "0.51834846", "0.5179802", "0.5177206", "0.51712877", "0.5171231", "0.516966", "0.51656306", "0.5164956", "0.51641893", "0.51594996", "0.5156377", "0.515247", "0.51478505", "0.51434106", "0.51331115", "0.51293385", "0.5126033", "0.51249003" ]
0.6039716
2
Function to generate the output graphs displayed in the Tygron engine.
def biodiversity_graph(self, graph="percentage"): plt.ioff() fig, ax = plt.subplots() index = np.arange(7) bar_width = 0.3 offset = bar_width / 2 if graph == "score": if self.PotTax_reference is not None: # this requires a deepcopy, otherwise the xticks updates also # updates the PotTax_percentage indexes. xticks = deepcopy(self.PotTax_reference.index.values) for i, item in enumerate(xticks): if item == "DragonDamselflies": xticks[i] = "Dragon &\nDamselflies" if item == "HigherPlants": xticks[i] = "Higher\nPlants" #label = ("reference: " + # str(round(self.PotTax_reference.sum().TFI, 2))) label = "initial board" reference = ax.bar( index-offset, self.PotTax_reference.values.flatten(), bar_width, label=label, tick_label=xticks) if self.PotTax_intervention is not None: #label = ("intervention: " + # str(round(self.PotTax_intervention.sum().TFI, 2))) label = "current board" intervention = ax.bar( index+offset, self.PotTax_intervention.values.flatten(), bar_width, label=label, tick_label=xticks) ax.set_title("Biodiversity scores") ax.set_ylabel("total value") legend = ax.legend(loc='best', facecolor='black', edgecolor='w', fancybox=True, framealpha=0.5, fontsize="large") plt.setp(legend.get_texts(), color='w') else: if self.PotTax_percentage is not None: # this requires a deepcopy, otherwise the xticks updates also # updates the PotTax_percentage indexes. xticks = deepcopy(self.PotTax_percentage.index.values) for i, item in enumerate(xticks): if item == "DragonDamselflies": xticks[i] = "Dragon &\nDamselflies" if item == "HigherPlants": xticks[i] = "Higher\nPlants" data = self.PotTax_percentage.values.flatten() percentage = ax.bar( index, data, bar_width, label="percentage", tick_label=xticks) ax.set_title("Biodiversity change") ax.set_ylabel("change (%)") # the xticks rotation could probably be handled better. for tick in ax.get_xticklabels(): tick.set_rotation(90) tick.set_fontsize(14) # set the color of all figure borders, axis ticks and text to white. ax.spines['bottom'].set_color('w') ax.spines['top'].set_color('w') ax.spines['right'].set_color('w') ax.spines['left'].set_color('w') ax.tick_params(axis='x', colors='w') ax.tick_params(axis='y', colors='w') ax.yaxis.label.set_color('w') ax.yaxis.label.set_fontsize(14) ax.xaxis.label.set_color('w') ax.xaxis.label.set_fontsize(14) ax.title.set_fontsize(20) ax.title.set_color('w') plt.tight_layout() if graph == "score": plt.savefig(os.path.join(self.web_dir, "biodiversity_score1.png"), edgecolor='w',transparent=True) else: plt.savefig(os.path.join(self.web_dir, "biodiversity_score2.png"), edgecolor='w',transparent=True) plt.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graphs(self):\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n plot_model(\n self.latent_to_states_model, to_file=\"latent_to_states_model.png\"\n )\n plot_model(self.batch_model, to_file=\"batch_model.png\")\n if self.mol_to_latent_model is not None:\n plot_model(self.mol_to_latent_model, to_file=\"mol_to_latent_model.png\")\n\n print(\"Models exported to png files.\")\n\n except:\n print(\"Check pydot and graphviz installation.\")", "def graphs():\n return render_template(\"graphs.html\")", "def gen_graph(self):", "def plot_graph(self) -> None:", "def results():\n\n # # 1. tau_e graph\n # # ------------------------------------------------------------\n\n tau_es = np.load(datapath / \"tau_es.npy\", allow_pickle=True)\n\n # I want to plot tau_e against b for various Ns. Annoyingly this\n # means I have to do some index juggling.\n\n # This is all because of the way I set up datagen.DataSet... oh well.\n\n for i, N in enumerate(Ns):\n\n # values to plot against b for the specific N\n vals = []\n\n for j, b in enumerate(bs):\n\n k = Nb_to_ks[i][j]\n vals.append(tau_es[k])\n\n plt.plot(bs, vals, \"-\")\n\n plt.title(\"Auto-correlation e-folding timelag for \"\n \"variable temperatures, grid sizes\")\n\n plt.xlabel(\"$\\\\beta$\")\n plt.ylabel(\"$\\\\tau_e$\")\n\n plt.legend([f\"N={N}\" for N in Ns])\n\n plt.savefig(resultspath / \"tau_es.pdf\")\n # plt.show()\n plt.close()\n\n # 2. magnetisation graphs\n # ------------------------------------------------------------\n\n mags_list = [np.load(datapath / f\"mags-{k}.npy\") for k in range(kcount)]\n\n for i, N in enumerate(Ns):\n\n plt.title(f\"Square magnetisations N={N}\")\n plt.xlabel(\"t\")\n plt.ylabel(\"M\")\n\n for j, b in enumerate(bs):\n\n c = np.max([0, np.min([1, 10 * (b - 0.4)])])\n\n k = Nb_to_ks[i][j]\n vals = np.mean(mags_list[k]**2, axis=1)\n plt.plot(vals, color=(1 - c, 0, c))\n\n plt.savefig(resultspath / f\"mags-{N}.pdf\")\n # plt.show()\n plt.close()\n\n # 3. autoc graphs\n # ------------------------------------------------------------\n\n autocs_list = [\n np.load(datapath / f\"autocs-{k}.npy\") for k in range(kcount)]\n\n for i, N in enumerate(Ns):\n\n plt.figure(figsize=(8, 6))\n plt.axes(position=[.05, .05, .8, .9])\n\n plt.title(f\"Auto-correlation of $|M|$ with N={N}\")\n plt.xlabel(\"$ \\\\tau $\")\n plt.ylabel(\"$ A(\\\\tau) $\")\n\n for j, b in enumerate(bs):\n\n c = np.max([0, np.min([1, 10 * (b - 0.4)])])\n\n k = Nb_to_ks[i][j]\n autocs = np.load(datapath / f\"autocs-{k}.npy\")\n\n iternum = autocs.shape[0]\n sysnum = autocs.shape[1]\n vals = np.mean(autocs, axis=1)\n errs = np.std(autocs, axis=1, ddof=1) / np.sqrt(sysnum)\n\n plt.errorbar(range(iternum), vals, errs,\n color=(1 - c, 0, c), ecolor=(1 - c, 0, c, 0.4),\n elinewidth=1.5)\n\n # plt.plot(np.log(vals))\n\n plt.legend(bs, loc='center left', bbox_to_anchor=(1, 0.5))\n\n plt.savefig(resultspath / f\"autocs-{N}.pdf\")\n # plt.show()\n plt.close()", "def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()", "def create_visual_graph(self):\n if self.predict_new and self.prediction_without_covid_case:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n else:\n self.restore_prediction_df()\n if not self.analysis_plot:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n\n self.do_plot()\n self.output_graph_file = OUTPUT_GRAPH_PATH\n return self.output_graph_file", "def generate_statistics_plots(graph_name, graph_steps):\n df_final_situation = pd.DataFrame(columns=[\"type\", \"value\"])\n df_step = pd.DataFrame(columns=[\"type\", \"step\", \"value\"])\n df_exposed = pd.DataFrame(columns=[\"step\", \"type\", \"value\"])\n\n st.markdown(\"\")\n\n for i in range(graph_steps):\n # read graph and print stats\n graph_result_path = \"./data/output/\"\n G = nx.read_gexf(f\"{graph_result_path}G_{graph_name}_step{i}.gexf\")\n print_stats(G, i, graph_name)\n\n # LINE CHART (append informations into dataframe)\n df_step = df_step.append(\n {\"type\": \"not_exposed\", \"step\": i, \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"exposed\", \"step\": i, \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"infected\", \"step\": i, \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n line_chart = px.line(\n df_step,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Infection overall: {graph_name} step: {i}\",\n )\n\n # BAR CHART (append informations into dataframe)\n df_exposed = df_exposed.append(\n {\n \"step\": i,\n \"type\": \"opinion_leader\",\n \"value\": cn.count_exposed_opinion_leader(G),\n },\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"bot\", \"value\": cn.count_exposed_bot(G)},\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"user\", \"value\": cn.count_exposed_user(G)},\n ignore_index=True,\n )\n bar_chart = px.bar(\n df_exposed,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Type of agents exposed: {graph_name} step: {i}\",\n )\n\n # PIE CHART (append informations into dataframe)\n if i == 4:\n df_final_situation = df_final_situation.append(\n {\"type\": \"not_exposed\", \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"exposed\", \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"infected\", \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n #### CREATE THE PLOTS\n ##Uncomment plot(..) to save the plots to disk in html format\n\n plot_folder = \"./data/plots/\"\n\n # Plotly Line Plot\n # plot(line_chart, filename=f\"{plot_folder}steps_{graph_name}.html\")\n st.plotly_chart(line_chart, use_container_width=True)\n\n # Plotly bar plot\n # plot(bar_chart, filename=f\"{plot_folder}exposed_type_{graph_name}.html\")\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Plotly final pie chart\n final_pie_chart = px.pie(\n df_final_situation, values=\"value\", names=\"type\", title=f\"Final situation plot of: {graph_name}\"\n )\n # plot(final_pie_chart, filename=f\"{plot_folder}final_situation.html\")\n st.plotly_chart(final_pie_chart, use_container_width=True)\n\n print(\"\\nStatistics calculated succesfully\")\n\n return True", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()", "def apply(tsys, parameters=None):\n\n gviz = visualize_graphviz.visualize(tsys, parameters=parameters)\n return gviz", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)", "def plots():\n out = interactive_output(generate_plots, {'gsize':gridSlider, 'ra':RABox, 'ra':RASlider, 'dec':DECBox, 'dec':DECSlider, 'ang':radBox, 'ang':radSlider, 'style':hexDrop})\n return display(widgrid, out)", "def output_div(self, output_method):\n instance = self.instance\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n order = 1\n export_list = []\n if output_method == 'list':\n for item in result:\n export_list.append(G.nodes[item].output(order))\n order += 1\n return export_list\n elif output_method == 'print':\n for item in result:\n pprint (G.nodes[item].output(order))\n order += 1\n return\n elif output_method == 'single_json' or output_method == 'multiple_jsons':\n path2 = os.getcwd() + '/json/'\n if not os.path.exists(path2):\n os.mkdir(path2)\n if output_method == 'single_json':\n f = open(path2 + self.table_name + '.json','w')\n for item in result:\n f.write(G.nodes[item].output(order) + '\\n')\n order += 1\n f.close() # Notice that f.close() is out of the loop to create only one file\n else: #if output_method == 'multiple_jsons'\n for item in result:\n f = open(path2 + self.table_name + str(order)+'.json','w')\n f.write(G.nodes[item].output(order))\n order += 1\n f.close() # Notice that f.close() is in the loop to create multiple files\n return\n elif output_method == 'single_html' or output_method == 'multiple_htmls':\n path2 = os.getcwd() + '/html/'\n if not os.path.exists(path2):\n os.mkdir(path2)\n page = Page()\n if output_method == 'single_html':\n self.page = Page()\n for item in result:\n view = G.nodes[item]\n self.html_output(order, view, 'single')\n order += 1\n self.page.render('./html/' + self.table_name + '_all' + '.html')\n else: # if output_method == 'multiple_htmls'\n path3 = os.getcwd() + '/html/' + self.table_name\n if not os.path.exists(path3):\n os.mkdir(path3)\n for item in result:\n view = G.nodes[item]\n self.html_output(order, view, 'multiple')\n order += 1\n return", "def graphing2():\n return render_template('graph2.html')", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def generate(self):\n\n # Load the required datapoints into memory.\n self._load_results()\n\n # Calculate datapoints statistics, like min. and max. values.\n self._calc_stats()\n\n # Generate the plots.\n self._generate_scatter_plots()\n self._generate_histograms()\n\n # Put together the final HTML report.\n self._generate_report()", "def make_htt_plots(input_filename, output_dir):\n in_stem = os.path.splitext(os.path.basename(input_filename))[0]\n output_dir = os.path.join(output_dir, in_stem)\n if not os.path.isdir(output_dir):\n print 'Making output dir', output_dir\n os.makedirs(output_dir)\n\n f = cu.open_root_file(input_filename)\n tree = cu.get_from_file(f, \"valid\")\n\n common_cut = COMMON_CUT\n norm_cut = '1./nMatches' # normalisation, for event-level quantities, since we store it for each match in an event\n if common_cut != '':\n norm_cut += ' && %s' % common_cut\n\n do_htt_plots(tree, output_dir, norm_cut)\n\n do_mht_plots(tree, output_dir, norm_cut)\n\n # Do plots where y axis is some variable of interest\n do_dr_plots(tree, output_dir, common_cut)\n\n do_rsp_plots(tree, output_dir, common_cut)\n\n do_nvtx_plots(tree, output_dir, norm_cut)\n\n do_njets_plots(tree, output_dir, norm_cut)\n\n do_jet_pt_plots(tree, output_dir, common_cut)\n\n f.Close()", "def generate(self, diagram):", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n\n # chdir into the working directory\n os.chdir(env['WORKDIR'])\n\n for nclPlotFile in self._ncl:\n # copy the NCL command to the workdir\n shutil.copy2('{0}/{1}'.format(env['NCLPATH'],nclPlotFile), '{0}/{1}'.format(env['WORKDIR'], nclPlotFile))\n\n nclFile = '{0}/{1}'.format(env['WORKDIR'],nclPlotFile)\n rc, err_msg = cesmEnvLib.checkFile(nclFile, 'read')\n if rc:\n try:\n print(' calling NCL plot routine {0}'.format(nclPlotFile))\n subprocess.check_call(['ncl', '{0}'.format(nclFile)], env=env)\n except subprocess.CalledProcessError as e:\n print('WARNING: {0} call to {1} failed with error:'.format(self.name(), nclFile))\n print(' {0}'.format(e.cmd))\n print(' rc = {0}'.format(e.returncode))\n else:\n print('{0}... continuing with additional plots.'.format(err_msg))", "def _generate_all_charts(spec, input_data):\n\n def _generate_chart(_, data_q, graph):\n \"\"\"Generates the chart.\n \"\"\"\n\n logs = list()\n\n logging.info(\" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\")))\n logs.append((\"INFO\", \" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\"))))\n\n job_name = graph[\"data\"].keys()[0]\n\n csv_tbl = list()\n res = list()\n\n # Transform the data\n logs.append((\"INFO\", \" Creating the data set for the {0} '{1}'.\".\n format(graph.get(\"type\", \"\"), graph.get(\"title\", \"\"))))\n data = input_data.filter_data(graph, continue_on_error=True)\n if data is None:\n logging.error(\"No data.\")\n return\n\n chart_data = dict()\n chart_tags = dict()\n for job, job_data in data.iteritems():\n if job != job_name:\n continue\n for index, bld in job_data.items():\n for test_name, test in bld.items():\n if chart_data.get(test_name, None) is None:\n chart_data[test_name] = OrderedDict()\n try:\n chart_data[test_name][int(index)] = \\\n test[\"result\"][\"receive-rate\"]\n chart_tags[test_name] = test.get(\"tags\", None)\n except (KeyError, TypeError):\n pass\n\n # Add items to the csv table:\n for tst_name, tst_data in chart_data.items():\n tst_lst = list()\n for bld in builds_dict[job_name]:\n itm = tst_data.get(int(bld), '')\n if not isinstance(itm, str):\n itm = itm.avg\n tst_lst.append(str(itm))\n csv_tbl.append(\"{0},\".format(tst_name) + \",\".join(tst_lst) + '\\n')\n\n # Generate traces:\n traces = list()\n index = 0\n groups = graph.get(\"groups\", None)\n visibility = list()\n\n if groups:\n for group in groups:\n visible = list()\n for tag in group:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\",\n \"No data for the test '{0}'\".\n format(test_name)))\n continue\n if tag in chart_tags[test_name]:\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n visible.extend([True for _ in range(len(trace))])\n res.append(rslt)\n index += 1\n break\n visibility.append(visible)\n else:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\", \"No data for the test '{0}'\".\n format(test_name)))\n continue\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n res.append(rslt)\n index += 1\n\n if traces:\n # Generate the chart:\n try:\n layout = deepcopy(graph[\"layout\"])\n except KeyError as err:\n logging.error(\"Finished with error: No layout defined\")\n logging.error(repr(err))\n return\n if groups:\n show = list()\n for i in range(len(visibility)):\n visible = list()\n for r in range(len(visibility)):\n for _ in range(len(visibility[r])):\n visible.append(i == r)\n show.append(visible)\n\n buttons = list()\n buttons.append(dict(\n label=\"All\",\n method=\"update\",\n args=[{\"visible\": [True for _ in range(len(show[0]))]}, ]\n ))\n for i in range(len(groups)):\n try:\n label = graph[\"group-names\"][i]\n except (IndexError, KeyError):\n label = \"Group {num}\".format(num=i + 1)\n buttons.append(dict(\n label=label,\n method=\"update\",\n args=[{\"visible\": show[i]}, ]\n ))\n\n layout['updatemenus'] = list([\n dict(\n active=0,\n type=\"dropdown\",\n direction=\"down\",\n xanchor=\"left\",\n yanchor=\"bottom\",\n x=-0.12,\n y=1.0,\n buttons=buttons\n )\n ])\n\n name_file = \"{0}-{1}{2}\".format(spec.cpta[\"output-file\"],\n graph[\"output-file-name\"],\n spec.cpta[\"output-file-type\"])\n\n logs.append((\"INFO\", \" Writing the file '{0}' ...\".\n format(name_file)))\n plpl = plgo.Figure(data=traces, layout=layout)\n try:\n ploff.plot(plpl, show_link=False, auto_open=False,\n filename=name_file)\n except plerr.PlotlyEmptyDataError:\n logs.append((\"WARNING\", \"No data for the plot. Skipped.\"))\n\n data_out = {\n \"job_name\": job_name,\n \"csv_table\": csv_tbl,\n \"results\": res,\n \"logs\": logs\n }\n data_q.put(data_out)\n\n builds_dict = dict()\n for job in spec.input[\"builds\"].keys():\n if builds_dict.get(job, None) is None:\n builds_dict[job] = list()\n for build in spec.input[\"builds\"][job]:\n status = build[\"status\"]\n if status != \"failed\" and status != \"not found\" and \\\n status != \"removed\":\n builds_dict[job].append(str(build[\"build\"]))\n\n # Create \"build ID\": \"date\" dict:\n build_info = dict()\n tb_tbl = spec.environment.get(\"testbeds\", None)\n for job_name, job_data in builds_dict.items():\n if build_info.get(job_name, None) is None:\n build_info[job_name] = OrderedDict()\n for build in job_data:\n testbed = \"\"\n tb_ip = input_data.metadata(job_name, build).get(\"testbed\", \"\")\n if tb_ip and tb_tbl:\n testbed = tb_tbl.get(tb_ip, \"\")\n build_info[job_name][build] = (\n input_data.metadata(job_name, build).get(\"generated\", \"\"),\n input_data.metadata(job_name, build).get(\"version\", \"\"),\n testbed\n )\n\n work_queue = multiprocessing.JoinableQueue()\n manager = multiprocessing.Manager()\n data_queue = manager.Queue()\n cpus = multiprocessing.cpu_count()\n\n workers = list()\n for cpu in range(cpus):\n worker = Worker(work_queue,\n data_queue,\n _generate_chart)\n worker.daemon = True\n worker.start()\n workers.append(worker)\n os.system(\"taskset -p -c {0} {1} > /dev/null 2>&1\".\n format(cpu, worker.pid))\n\n for chart in spec.cpta[\"plots\"]:\n work_queue.put((chart, ))\n work_queue.join()\n\n anomaly_classifications = list()\n\n # Create the header:\n csv_tables = dict()\n for job_name in builds_dict.keys():\n if csv_tables.get(job_name, None) is None:\n csv_tables[job_name] = list()\n header = \"Build Number:,\" + \",\".join(builds_dict[job_name]) + '\\n'\n csv_tables[job_name].append(header)\n build_dates = [x[0] for x in build_info[job_name].values()]\n header = \"Build Date:,\" + \",\".join(build_dates) + '\\n'\n csv_tables[job_name].append(header)\n versions = [x[1] for x in build_info[job_name].values()]\n header = \"Version:,\" + \",\".join(versions) + '\\n'\n csv_tables[job_name].append(header)\n\n while not data_queue.empty():\n result = data_queue.get()\n\n anomaly_classifications.extend(result[\"results\"])\n csv_tables[result[\"job_name\"]].extend(result[\"csv_table\"])\n\n for item in result[\"logs\"]:\n if item[0] == \"INFO\":\n logging.info(item[1])\n elif item[0] == \"ERROR\":\n logging.error(item[1])\n elif item[0] == \"DEBUG\":\n logging.debug(item[1])\n elif item[0] == \"CRITICAL\":\n logging.critical(item[1])\n elif item[0] == \"WARNING\":\n logging.warning(item[1])\n\n del data_queue\n\n # Terminate all workers\n for worker in workers:\n worker.terminate()\n worker.join()\n\n # Write the tables:\n for job_name, csv_table in csv_tables.items():\n file_name = spec.cpta[\"output-file\"] + \"-\" + job_name + \"-trending\"\n with open(\"{0}.csv\".format(file_name), 'w') as file_handler:\n file_handler.writelines(csv_table)\n\n txt_table = None\n with open(\"{0}.csv\".format(file_name), 'rb') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',', quotechar='\"')\n line_nr = 0\n for row in csv_content:\n if txt_table is None:\n txt_table = prettytable.PrettyTable(row)\n else:\n if line_nr > 1:\n for idx, item in enumerate(row):\n try:\n row[idx] = str(round(float(item) / 1000000, 2))\n except ValueError:\n pass\n try:\n txt_table.add_row(row)\n except Exception as err:\n logging.warning(\"Error occurred while generating TXT \"\n \"table:\\n{0}\".format(err))\n line_nr += 1\n txt_table.align[\"Build Number:\"] = \"l\"\n with open(\"{0}.txt\".format(file_name), \"w\") as txt_file:\n txt_file.write(str(txt_table))\n\n # Evaluate result:\n if anomaly_classifications:\n result = \"PASS\"\n for classification in anomaly_classifications:\n if classification == \"regression\" or classification == \"outlier\":\n result = \"FAIL\"\n break\n else:\n result = \"FAIL\"\n\n logging.info(\"Partial results: {0}\".format(anomaly_classifications))\n logging.info(\"Result: {0}\".format(result))\n\n return result", "def generateHtml(self):\n # only the master processor needs to do this\n if not self.master: return\n\n for page in self.layout.pages:\n \n # build the metric dictionary\n metrics = {}\n page.models = []\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n mname = dataset.getncattr(\"name\")\n if mname != \"Benchmark\": page.models.append(mname)\n if not dataset.groups.has_key(page.name): continue\n group = dataset.groups[page.name]\n\n # if the dataset opens, we need to add the model (table row)\n metrics[mname] = {}\n \n # each model will need to have all regions\n for region in self.regions: metrics[mname][region] = {}\n \n # columns in the table will be in the scalars group\n if not group.groups.has_key(\"scalars\"): continue\n \n # we add scalars to the model/region based on the region\n # name being in the variable name. If no region is found,\n # we assume it is the global region.\n grp = group.groups[\"scalars\"]\n for vname in grp.variables.keys():\n found = False\n for region in self.regions:\n if region in vname: \n found = True\n var = grp.variables[vname]\n name = vname.replace(region,\"\")\n metrics[mname][region][name] = Variable(name = name,\n unit = var.units,\n data = var[...])\n if not found:\n var = grp.variables[vname]\n metrics[mname][\"global\"][vname] = Variable(name = vname,\n unit = var.units,\n data = var[...])\n page.setMetrics(metrics)\n \n # write the HTML page\n f = file(os.path.join(self.output_path,\"%s.html\" % (self.name)),\"w\")\n f.write(str(self.layout))\n f.close()", "def print_graph() -> None:\n raise NotImplementedError", "def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data", "def main(self):\n if self.mode==0: #drawing\n self.draw()\n self.graph_drawing=self.cleanGraph(self.graph_drawing)\n #if len(self.graph_drawing)>1:\n # self.function_interpolation=self.polynomialInterpolation2D(self.graph_drawing,1)\n # self.graph_interpolation=self.sample(self.function_interpolation,len(self.graph_drawing))\n elif self.mode==1: #construction\n self.step+=1\n self.time=self.step/self.max_step\n if self.step>self.max_step:\n self.mode=2\n #self.graph_construction=self.discreteComplexComposeGraph(self.coefficients,self.time) #complex now\n self.graph_construction=self.numpyComposeConstructionGraph(self.coefficients,t=self.time)\n self.vectors=self.getVectors([(0,0)]+self.graph_construction)\n self.graph_display.append(self.graph_construction[-1])\n\n elif self.mode==2:\n self.draw()", "def graphs_kelly():\n return render_template(\"graphs-Kelly.html\")", "def graph():\n port_to_csv()\n\n source = ''\n if request.form.get('GraphType', '') == '':\n source = url_for('static', filename='frog no graph.png')\n else:\n source = s_modular(request.form.get('GraphType', ''), '')\n\n return render_template(\n 'tmpGraph.jade',\n title=\"Graph\",\n year=datetime.now().year,\n src=source\n )", "def visualize(stuff, **options):\n separate = r\"\\newpage\" #by default, a new tupel is put on a new page\n name = \"some_text_file\" #by default this file is used\n for key in options:\n if key == \"separate\":\n separate = options[key]\n if key == \"name\":\n name = options[key]\n works = True\n totallines = [r\"\\documentclass{article}\", r\"\\usepackage{xcolor}\", r\"\\usepackage{tikz,pgf}\", r\"\\usepackage[left = 0 cm, top = 0cm, bottom = 0cm, right = 2cm]{geometry}\", r\"\\begin{document}\", r\"\\pagestyle{empty}\"]\n for description in stuff:\n data = stuff[description]\n if checkdataformat(description, data):\n if description == \"config\":\n lines = gentikz(data)\n elif description == \"movelist\":\n lines = showmoveslist(data[0], data[1], data[2])\n elif description == \"movelists\":\n lines = compareshowmoveslists(data[0], data[1], data[2])\n elif description == \"list\":\n lines = showlist(data)\n elif description == \"configurations\":\n lines = showconfigurations(data)\n elif description == \"movetable\":\n lines = nktable(data[0], data[1], sort = 'value')\n elif description == \"incrementtable\":\n lines = nktable(data[0], data[1], sort = 'increment')\n elif description == \"totalptable\":\n lines = nktable(data[0], data[1], sort = 'totalpossibilities')\n elif description == \"ptable\":\n lines = nktable(data[0], data[1], sort = 'adjustedpossibilities')\n elif description == \"bfptable\":\n lines = nktable(data[0], data[1], sort = 'bfadjustedpossibilities')\n else:\n print(\"unknown description\")\n lines = []\n for line in lines:\n totallines.append(line)\n totallines.append(separate)\n else:\n print(description, \":\", data, \"don't match, please read help(visualization)\")\n works = False\n totallines.append(r\"\\end{document}\")\n if works:\n compile(totallines, name)", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def graphing1():\n return render_template('graph1.html')", "def make_final_graph(base_dir=DEFAULT_BASE_DIR,\n start_run=0, end_run=100):\n plt.style.use('default')\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif') # sans-\n plt.rcParams.update({'font.size': 16,\n 'font.serif' : ['Computer Modern Roman']})\n plt.figure(1, figsize=(8, 7))\n pos = {4: 221, 2: 222, 1: 223, 0:224}\n for i, _ in [(4, 10000), (2, 25), (1, 5), (0, 1)]:\n out_folder_list = [\"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n for j in range(start_run, end_run)]\n res_dict = dict()\n\n for out_folder in out_folder_list:\n p_learn = json.load(open(\n \"{}/dynamics.json\".format(out_folder), \"rt\"))\n\n # Convert to array to make everything plottable.\n for k in p_learn:\n if k.endswith(\"AUC\"):\n p_learn[k] = np.array(p_learn[k])\n if k in res_dict:\n res_dict[k].append(p_learn[k])\n else:\n res_dict[k] = [p_learn[k]]\n\n out_folder_plot = \"/\".join(out_folder_list[0].split(\"/\")[:-1])\n plt.subplot(pos[i])\n me.plot_quantiles(res_dict, out_folder_plot, \"quantile\",\n pos=pos[i]%10, saveit=False)\n plt.savefig(\"cumul_shuttle_exp.pdf\")", "def build_graph(self):\n pass", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def main():\n\tplt.clf()\n\taxes = setup_axes()\n\tplot_output(axes, \"../../simulations/default\", \"black\")\n\tplot_output(axes, \"../../simulations/yccsr_zero\", \"crimson\")\n\tplot_output(axes, \"../../simulations/yccsr_linear\", \"lime\")\n\tplot_output(axes, \"../../simulations/yccsr_1-exp\", \"deepskyblue\")\n\tvisuals.plot_track_points_intervals(axes[0],\n\t\tvice.history(\"../../simulations/default\"), element = \"Sr\",\n\t\treference = \"Fe\")\n\tplot_legend(axes[1])\n\tplt.tight_layout()\n\tvisuals.yticklabel_formatter(axes[1])\n\tplt.savefig(sys.argv[1])\n\tplt.clf()", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def _makeGraphs(cnames=(\"cnt\",\"temp\"), finame=\"graf.png\"):\n colors=[\"660000\",\"ff0000\", \"770000\"]\n if len(cnames)==1: finame= cnames[0]+'.png'\n ri= open(\"graph.txt\",\"w\")\n #ri.write(\"graph graf.png --start %d -e %d --step 60 -w 600 \"%\n # (time0, time0+60*60))\n #ri.write(\"graph graf.png -s teatime --step 60 -w 600 \")\n #ri.write(\"graph graf.png -s 17:55 --step 60 -w 600 \") # -10 hours max.\n # time: -s now-10h -s 1:0 -e 4:0\n #ri.write(\"graph graf.png -s now-10h --step 60 -w 600 \")\n ri.write(\"graph \"+finame+\" -s now-2d --step 60 -w 600 \")\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"DEF:%s=%s:%s:AVERAGE \"% (cn, RRDDB, cn))\n ix=ix+1\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"LINE1:%s#%s:%s \"%(cn,colors[ix],cn))\n ix=ix+1\n ri.close()\n os.system(\"rrdtool - <graph.txt\")", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def plot_global(type):\n click.echo(click.style(\n \"Generating Plot....\", fg='cyan', bold='true'))\n plot_time_series.TimeSeriesPloTs.plot_global(type)\n click.echo(click.style(\n \"Done....\", fg='green', bold='true'))", "def show_custom_graph(self):\n pass", "def get_visualizations( self, dataset ):\n\n return [ 'phyloviz' ]", "def main(showSamples=True, showConfusion=True):\n ndigit = 10\n elambda = [0.4, 0.6, 0.8]\n for i in elambda:\n test(ndigit, i, showSamples, showConfusion)\n if showSamples:\n pltmulti('graphs.pdf')", "def get(self):\n from fantasm.utils import outputMachineConfig\n\n machineConfig = getMachineConfig(self.request)\n content = outputMachineConfig(machineConfig, skipStateNames=[self.request.GET.get('skipStateName')])\n if self.request.GET.get('type', 'png') == 'png':\n self.response.out.write(\n \"\"\"\n <html>\n <head></head>\n <body onload=\"javascript:document.forms.chartform.submit();\">\n <form id='chartform' action='http://chart.apis.google.com/chart' method='POST'>\n <input type=\"hidden\" name=\"cht\" value=\"gv:dot\" />\n <input type=\"hidden\" name=\"chl\" value='%(chl)s' />\n <input type=\"submit\" value=\"Generate GraphViz .png\" />\n </form>\n </body>\n \"\"\" % {'chl': content.replace('\\n', ' ')})\n else:\n self.response.out.write(content)", "def sixteen_graphs(the_dir):\n # TODO change to deprecation warning\n warnings.warn(\"Does not call sv_pipeline functoins correctly\", DeprecationWarning)\n\n plb.rcParams['figure.figsize'] = 30, 30\n plt.clf()\n plt.figure(1)\n\n # should look like: read_data/all_files/chr4_124,017,492_124,029,032_merged.txt\n merged_files = glob.glob(the_dir + '*merged.txt')\n print(\"Running for {} regions\".format(len(merged_files)))\n for merged_filename in merged_files:\n # get filenames\n prefix = merged_filename[len(the_dir):-11]\n fasta_filename = the_dir + prefix + \".fa\"\n bed_filename = the_dir + prefix + \"-refcoords.bed\"\n print('Using ' + prefix)\n\n for min_matching_length in range(100, 1700, 100):\n print(min_matching_length)\n # used for ground truth\n preset, postset, spanset, gapset = get_read_classifications(prefix,\\\n bed_filename, merged_filename=merged_filename)\n # Generate and prune graph\n graph = generate_graph(prefix, fasta_filename, min_matching_length)\n graph = nx_helpers.remove_nodes(graph, preset)\n graph = nx_helpers.remove_nodes(graph, postset)\n\n # Plot the graph\n plt.subplot(4, 4, min_matching_length/100)\n communities = nx_helpers.get_communities(graph)\n graph, communities = drop_small_communities(graph, communities)\n node_colors = node_community_colors(graph, communities)\n pos = nx.spring_layout(graph)\n title = \"Chr {0};\\n L={1}; NumCom={2}\\nComQual = {3}, MapQual={4}\"\\\n .format(prefix, min_matching_length, len(communities),\\\n community_quality(communities, spanset, gapset),\\\n mapping_quality(graph, spanset, gapset))\n nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n plt.savefig(\"figs/\" + prefix + '-16-communities.pdf')\n plt.clf()", "def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)", "def show_graph(self, output_fmt='pdf', direction = 'BT'):\n from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.ObjectiveMechanism import ObjectiveMechanism\n from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.LearningMechanisms.LearningMechanism import LearningMechanism\n \n import graphviz as gv\n\n system_graph = self.graph\n learning_graph=self.learningGraph\n \n # build graph and configure visualisation settings\n G = gv.Digraph(engine = \"dot\", \n node_attr = {'fontsize':'12', 'fontname': 'arial', 'shape':'oval'}, \n edge_attr = {'arrowhead':'halfopen', 'fontsize': '10', 'fontname': 'arial'},\n graph_attr = {\"rankdir\" : direction} )\n \n # work with system graph\n rcvrs = list(system_graph.keys())\n # loop through receivers\n for rcvr in rcvrs:\n if isinstance(rcvr[0], ObjectiveMechanism) or isinstance(rcvr[0], LearningMechanism):\n continue\n rcvr_name = rcvr[0].name\n rcvr_shape = rcvr[0].variable.shape[1]\n rcvr_label = \" {} ({}) \".format(rcvr_name, rcvr_shape)\n \n # loop through senders\n sndrs = system_graph[rcvr]\n for sndr in sndrs:\n sndr_name = sndr[0].name\n sndr_shape = sndr[0].variable.shape[1]\n sndr_label = \" {} ({}) \".format(sndr_name, sndr_shape)\n \n # find edge name\n projs = sndr[0].outputState.sendsToProjections\n for proj in projs:\n if proj.receiver.owner == rcvr[0]:\n edge_name = proj.name\n edge_shape = proj.matrix.shape\n edge_label = \" {} {} \".format(edge_name, edge_shape)\n G.edge(sndr_label, rcvr_label, label = edge_label)\n \n if output_fmt == 'pdf':\n G.view(self.name.replace(\" \", \"-\"), cleanup=True)\n elif output_fmt == 'jupyter':\n return G", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def return_figures():\n\n graph_one = []\n df = cleanparrisdf('data/Salem-Village-Data-Set.csv')\n sources = [0,0,0,1,1,1]\n targets = [2,3,4,2,3,4]\n values = df[\"petition_count\"].tolist()\n\n data_one = dict(\n type = 'sankey',\n node = dict(\n pad = 10,\n thickness = 30,\n line = dict(\n color = \"black\",\n width = 0.5\n ),\n label = [\"Church Member\", \"Non-Church Member\", \"Anti-Parris Signatory\", \"Non-Signatory\", \"Pro-Parris Signatory\"],\n color = [\"red\", \"blue\", \"black\", \"grey\", \"white\"]\n ),\n link = dict(\n source = sources,\n target = targets,\n value = values\n ))\n\n layout_one = dict(\n title = 'Salem Residents\\' Stance on Minister Samuel Parris in 1695'\n )\n\n# second chart plots ararble land for 2015 as a bar chart\n graph_two = []\n df = cleantimelinedf('data/Accused-Witches-Data-Set.csv')\n x_val = df[\"month\"].tolist()\n y_val1 = df[\"accusation_count\"].tolist()\n y_val2 = df[\"execution_count\"].tolist()\n\n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val1,\n mode = 'lines+markers',\n name = \"People Accused of Witchcraft\"\n )\n )\n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val2,\n mode = 'lines+markers',\n name = \"People Executed for Witchcraft\"\n )\n )\n\n labels = [\"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\"]\n\n layout_two = dict(title = 'Salem Witch Trial Victim Count Over Time',\n xaxis = dict(title = 'Month (1692)', tickvals=[k+2 for k in range(len(labels))], ticktext=labels, tickangle=315),\n yaxis = dict(title = 'Number of People'),\n )\n\n\n# third chart plots percent of population that is rural from 1990 to 2015\n graph_three = []\n df = cleanplacesdf('data/Accused-Witches-Data-Set.csv')\n graph_three.append(\n go.Scattergeo(\n lon = df['long'],\n lat = df['lat'],\n text = df['text'],\n marker = dict(\n size = df['places_count'],\n sizeref = 2. * max(df['places_count'])/100,\n color = 'red',\n line = dict(width = 0 )\n )\n )\n )\n\n layout_three = dict(\n title = 'Towns Affected (Bubbles Proportional to Number Accused)',\n geo = dict(\n showframe = False,\n projection=dict( type='orthographic' ),\n showland = True,\n oceancolor = 'rgb(204, 255, 255)',\n showocean= True,\n landcolor = 'rgb(229, 255, 204)',\n lonaxis = dict( range= [-71.7 , -70.3] ),\n lataxis = dict( range= [42.3, 43.5] )\n )\n )\n\n figures = []\n figures.append(dict(data=[data_one], layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n\n return figures", "def generate_plots(self, input_data, input_labels=None):\n pass", "def plot(self):\n pass", "def _build_graph(self):\n pass", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def PlotGraph(obj):\n\n generated_text = \"\\n\\n\\nclass PlotGraph():\"\n\n # get the parameters needed from the object\n expression = obj[\"expression\"]\n title = obj[\"name\"] + \" Graph\"\n graphColor = \"b\"\n scatter = False\n\n # optional parameters\n if obj[\"title\"]:\n title = obj[\"title\"] # should be written more concisely in python 3.8\n\n if obj[\"graphColor\"]:\n graphColor = obj[\"graphColor\"] # should be written more concisely in python 3.8\n\n if obj[\"scatter\"]:\n scatter = obj[\"scatter\"] # should be written more concisely in python 3.8\n\n # CONSTRUCTOR\n # def __init__(self, start, stop, num_samples, title=\"example\"):\n generated_text += \"\\n\\tdef __init__(self, start, stop, num_samples, title=\\\"{}\\\"): \".format(title)\n generated_text += \"\\n\\t\\tself.function = \\\"\\\"\"\n generated_text += \"\\n\\t\\tself.title = title\"\n generated_text += \"\\n\\t\\tself.X = np.linspace(start, stop, num_samples)\"\n generated_text += \"\\n\\t\\tself.Y = []\"\n\n # f()\n generated_text += \"\\n\\n\\tdef f(self):\"\n generated_text += \"\\n\\t\\tself.Y = [self.compute(x) for x in self.X]\"\n\n # compute()\n generated_text += \"\\n\\n\\tdef compute(self, x):\"\n generated_text += \"\\n\\t\\treturn np.sin(x)\"\n\n # plot()\n generated_text += \"\\n\\n\\tdef plot(self, scatter=False, color='{}'):\".format(graphColor)\n generated_text += \"\\n\\t\\tplt.figure(1)\\n\\t\\tplt.title(self.title)\"\n generated_text += \"\\n\\t\\tif scatter:\"\n generated_text += \"\\n\\t\\t\\tplt.scatter(self.X, self.Y, c=color)\\n\\t\\t\\treturn\"\n generated_text += \"\\n\\t\\tplt.plot(self.X, self.Y, c=color)\"\n\n # show()\n generated_text += \"\\n\\n\\tdef show(self):\"\n generated_text += \"\\n\\t\\tplt.show()\"\n\n # call()\n generated_text += \"\\n\\n\\tdef call(self):\"\n generated_text += \"\\n\\t\\tself.f()\"\n generated_text += \"\\n\\t\\tself.plot()\"\n generated_text += \"\\n\\t\\tself.show()\"\n\n #print(generated_text)\n return generated_text", "def main():\n\n if not os.path.exists( os.path.join(os.getcwd(), 'Plots') ):\n os.mkdir('Plots')\n\n # Initialise the canvas and set aesthetics\n canv = TCanvas(\"canv\", \"canv\", 800, 600)\n canv.SetLogy()\n gStyle.SetOptStat(0)\n gStyle.SetOptTitle(0)\n\n # Initialise legend and set colours\n leg_height = len(models) * 0.06 # make y-length of legend dependent on n_models\n myLeg = TLegend(0.6, 0.9 - leg_height, 0.9, 0.9)\n myLeg.SetTextSize(0.02)\n\n # Initialise histogram arrays\n nJetHist = [None] * len(models)\n jetPtHist = [None] * len(models)\n leadJetPtHist = [None] * len(models)\n metPtHist = [None] * len(models)\n dPhiJJHist = [None] * len(models)\n\n # x-axis labels for plots\n nJetLabel = \"#it{n}_{jet}\"\n jetPtLabel = \"#it{p}_{T}^{jet}\"\n leadJetPtLabel = \"#it{p}_{T}^{j_{1}}\"\n metPtLabel = \"#it{E}_{T}^{miss}\"\n dPhiJJLabel = \"#Delta#it{#phi}_{j_{1} j_{2}}\"\n\n # Initialise histograms here so I can use them later\n for i, model in enumerate(models):\n nJetHist[i] = TH1F(\"nJet\"+model, \"nJet dist \"+model, 30, 0, 29)\n jetPtHist[i] = TH1F(\"jetPt\"+model, \"Jet pT dist \"+model, 30, 0, 3000)\n leadJetPtHist[i] = TH1F(\"leadJetPt\"+model, \"Lead jet pT dist \"+model, 30, 0, 3000)\n metPtHist[i] = TH1F(\"met\"+model, \"MET dist \"+model, 30, 0, 3000)\n dPhiJJHist[i] = TH1F(\"dPhijj\"+model, \"DPhi dist \"+model, 20, -1*(pi+0.1), pi+0.1)\n \n\n # Open root files, then draw individual histograms\n for i, model in enumerate(models):\n print Fore.MAGENTA + \"Running over model {0}/{1}.\".format(i+1, len(models))\n openFile = TFile(files[i])\n tree = openFile.Get(\"Events\")\n nEntries = tree.GetEntries()\n\n # Initialise progress bar\n widgets = [Percentage(), Bar('>'), ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = nEntries).start() \n\n for entry in xrange(nEntries):\n treeEntry = tree.GetEntry(entry)\n nJetHist[i].Fill(tree.nJet)\n \n for jet in xrange( len(tree.Jet_pt) ):\n jetPtHist[i].Fill(tree.Jet_pt[jet])\n\n if len(tree.Jet_pt) > 0: leadJetPtHist[i].Fill(tree.Jet_pt[0])\n metPtHist[i].Fill(tree.MET_pt)\n\n if len(tree.Jet_phi) >= 2:\n deltaPhi = tree.Jet_phi[0] - tree.Jet_phi[1]\n dPhiJJHist[i].Fill(deltaPhi) \n\n pbar.update(entry+1)\n \n pbar.finish()\n\n # Normalise histograms\n nJetHist[i].Scale(1./nEntries)\n jetPtHist[i].Scale(1./nEntries)\n leadJetPtHist[i].Scale(1./nEntries)\n metPtHist[i].Scale(1./nEntries)\n dPhiJJHist[i].Scale(1./nEntries)\n\n # Draw individual histograms and save\n drawIndivHistos(model, nJetHist[i], canv, myLeg, nJetLabel, \"nJet\", index=i)\n drawIndivHistos(model, jetPtHist[i], canv, myLeg, jetPtLabel, \"jetPT\", index=i)\n drawIndivHistos(model, leadJetPtHist[i], canv, myLeg, leadJetPtLabel, \"leadJetPT\", index=i)\n drawIndivHistos(model, metPtHist[i], canv, myLeg, metPtLabel, \"MET\", index=i)\n drawIndivHistos(model, dPhiJJHist[i], canv, myLeg, dPhiJJLabel, \"dPhi\", index=i)\n \n\n # Draw histograms for different models overlaid\n drawMultipleHistos(nJetHist, canv, myLeg, nJetLabel, \"nJet\")\n drawMultipleHistos(jetPtHist, canv, myLeg, jetPtLabel, \"jetPT\")\n drawMultipleHistos(leadJetPtHist, canv, myLeg, leadJetPtLabel, \"leadJetPT\")\n drawMultipleHistos(metPtHist, canv, myLeg, metPtLabel, \"MET\")\n drawMultipleHistos(dPhiJJHist, canv, myLeg, dPhiJJLabel, \"dPhi\")", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def outputs(self):\n pass", "def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))", "def render(self, chart):\n chart.create_visualization_files(self.__outputpath)", "def displayGraph(self):\n self.dto.displayVerticalGraph()\n print(\"Vertical Bar Graph displayed.\")", "def plot_novelty_results():\n RESULTS_DIR = \"experiments\"\n STDOUT_FILE = \"log.txt\"\n REWARD_PATTERN = r\" EpRewMean[ ]*\\| ([0-9\\-\\.]+)\"\n TIMESTEP_PATTERN = r\" TimestepsSoFar[ ]*\\| ([0-9\\-\\.]+)\"\n ITERATION_PATTERN = r\" Iteration ([0-9]+)\"\n\n GLOBS = [\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_es_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsres_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsresgaussian_*\"),\n os.path.join(RESULTS_DIR, \"novelty_DeceptivePointEnv-v0_nsressupervector_*\")\n ]\n\n COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\"\n ]\n\n LEGENDS = [\n \"ES\",\n \"NSR-ES (Terminal)\",\n \"NSR-ES (Gaussian)\",\n \"NSR-ES (Supervector)\"\n ]\n\n fig = pyplot.figure(figsize=[4.8, 4.8])\n\n for glob_pattern, legend, color in zip(GLOBS, LEGENDS, COLORS):\n experiment_paths = glob(glob_pattern)\n if len(experiment_paths) == 0:\n raise ValueError(\n \"Looks like there are no novelty experiments. Please see README.md on \"+\n \"running novelty search before plotting. Alternatively comment out call to `plot_novelty_results()`.\"\n )\n # Collect all lines and average over later\n xs = []\n ys = []\n for experiment_path in experiment_paths:\n # We just parse results from stdout file\n stdout_log = open(os.path.join(experiment_path, STDOUT_FILE), encoding=\"utf-8\").read()\n # Take maximum fitness of each generation.\n # We have only one printout for one result\n mean_rewards = list(map(float, re.findall(REWARD_PATTERN, stdout_log)))\n iteration = []\n max_rewards = []\n # Plot elite results\n for mean_reward in mean_rewards:\n max_reward = mean_reward\n if len(max_rewards) > 0:\n max_reward = max(max(max_rewards), max_reward)\n max_rewards.append(max_reward)\n iteration.append(len(max_rewards))\n\n xs.append(iteration)\n ys.append(max_rewards)\n\n # Average over curves\n xs = np.array(xs)\n ys = np.array(ys)\n average_x, average_y, std_y, lower_y, upper_y = interpolate_and_average(xs, ys, confidence_interval=True)\n\n pyplot.plot(average_x, average_y, c=color, label=legend)\n pyplot.fill_between(\n average_x,\n lower_y,\n upper_y,\n alpha=0.2,\n color=color,\n linewidth=0.0\n )\n\n pyplot.tick_params(axis='both', which='both', labelsize=\"x-large\")\n pyplot.grid(alpha=0.2)\n pyplot.xlabel(\"Generation\", fontsize=\"x-large\")\n pyplot.ylabel(\"Average Return\", fontsize=\"x-large\")\n pyplot.legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/novelty_results.pdf\", bbox_inches=\"tight\", pad_inches=0.0)", "def basic_stats_and_plots():\n \n basename = sys.argv[1]\n ops = (\"two_opt\", \"twoh_opt\", \"three_opt\", \"three_opt_broad\", \"swap\", \"swap_adj\")\n opfs = {\n \"two_opt\": tsp.two_opt,\n \"twoh_opt\": tsp.twoh_opt,\n \"three_opt\": tsp.three_opt,\n \"three_opt_broad\": tsp.three_opt_broad,\n \"swap\": tsp.swap_two,\n \"swap_adj\": tsp.swap_adj\n }\n \n lengths = range(6, 11)\n for length in lengths:\n stddev = []\n gini = []\n nneighbours = []\n prop_unique = []\n for op in ops:\n filename = os.path.join(basename,\n \"tsp_length_%d_%s\" % (length, op),\n \"TP_row0.dat\")\n print op, length\n x = np.genfromtxt(filename)\n # stats to get:\n stddev.append(np.std(x))\n gini.append(random_walks.gini_coeff(x))\n nneighbours.append(np.sum(x > 0))\n mu, sigma = rw_experiment_with_op(length, opfs[op])\n prop_unique.append((mu, sigma))\n\n gini_barchart(length, gini, ops)\n stddev_barchart(length, stddev, ops)\n plot_gini_v_nneighbours(length, gini, nneighbours, ops)\n plot_stddev_v_nneighbours(length, stddev, nneighbours, ops)\n plot_gini_v_prop_unique(length, gini, prop_unique, ops)\n plot_stddev_v_prop_unique(length, stddev, prop_unique, ops)", "def render(self, filename):\n lines = []\n for name in self._name_to_graph:\n text_graph = self._name_to_graph[name].render()\n lines.append(text_graph)\n if filename is None:\n print(\"\\n\".join(lines))\n else:\n with open(filename, \"w\") as out_file:\n out_file.write(\"\\n\".join(lines))", "def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def write_graph(self):\n if self.ax != None:\n # Calculate the spacing of the y-tick labels:\n labels = getattr( self, 'labels', [] )\n height_per = self.ax.get_position().get_points()[1][1]\n height_inches = self.fig.get_size_inches()[-1] * height_per\n height_pixels = self.fig.get_dpi() * height_inches\n max_height_labels = height_pixels / max( 1, len(labels) )\n\n # Adjust the font height to match the maximum available height\n font_height = max_height_labels * 1.7 / 3.0 - 1.0\n font_height = min( font_height, self.prefs['text_size'] )\n setp( self.ax.get_yticklabels(), size=font_height )\n\n self.ax.yaxis.draw( self.canvas.get_renderer() )\n\n total_xmax = 0\n for label in self.ax.get_yticklabels():\n bbox = label.get_window_extent( self.canvas.get_renderer() )\n total_xmax = max( bbox.xmax-bbox.xmin, total_xmax )\n move_left = (total_xmax+6) / self.prefs['width']\n\n pos = self.ax.get_position().get_points()\n pos[0][0] = move_left\n pos[1][0] = 1 - pos[0][0] - .02\n \n # Reset the height of the graph.\n pos[1][1] = 1 - pos[0][1] - self.prefs['figure_padding'] / \\\n float(self.prefs['height'])\n\n self.ax.set_position( [pos[0][0], pos[0][1], pos[1][0], pos[1][1]] )\n\n # Finally, call normal writer.\n super( HorizontalGraph, self ).write_graph()", "def prepare_and_draw_pgv_graph(self, output_dir=None):\n logging.info(\"prepare_pgv_graph\")\n\n if output_dir is None:\n output_folder = str(path.join(str(path.split(self.file)[0]), '../flowcharts'))\n self.logger.info('output_folder: ' + output_folder)\n try:\n mkdir(output_folder)\n self.logger.info('\"' + output_folder + '\" created.')\n except OSError as exc:\n self.logger.info('folder could not be created at first attempt: ' + output_folder)\n if exc.errno == errno.EEXIST and path.isdir(output_folder):\n self.logger.info('folder exists already: ' + output_folder)\n pass\n self.logger.exception('folder could not be created')\n else:\n output_folder = output_dir\n\n t = time.localtime()\n timestamp = time.strftime('%Y-%m-%d_%H-%M-%S', t)\n filename = timestamp + '_' + path.splitext(path.split(self.file)[1])[0]\n\n # gml output\n self.logger.info('output_gml: ' + str(path.join(output_folder, filename + '.gml')))\n nx.write_gml(self.DiGraph, path.join(output_folder, filename + '.gml'))\n\n # dot output\n\n self.logger.info('output_dot: ' + str(path.join(output_folder, filename + '.dot')))\n self.pgv_graph.write(path.join(output_folder, filename + '.dot'))\n\n # png output\n self.logger.info('output_png: ' + str(path.join(output_folder, filename + '.png')))\n self.draw_pgv_graph(path.join(output_folder, filename + '.png'))", "def build(self):\r\n self.dirty = 0\r\n \r\n # Files first\r\n for output in self.files.keys():\r\n params = self.files[output]\r\n if (params[1] != -1):\r\n filename = params[0]\r\n freq = params[1]\r\n if (output == 'energies'):\r\n self.myOutputs.append(OutputEnergies.OutputEnergies(filename, freq, 1,0,1.0,0))\r\n elif (output == 'dcdtrajpos'):\r\n if (os.path.exists(filename)): # Continue\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 1))\r\n else: # Overwrite\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 0))\r\n elif (output == 'dcdtrajvel'):\r\n if (os.path.exists(filename)):\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 1))\r\n else:\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 0))\r\n elif (output == 'xyztrajforce'):\r\n self.myOutputs.append(OutputXYZTrajectoryForce.OutputXYZTrajectoryForce(filename, freq))\r\n elif (output == 'xyztrajpos'):\r\n self.myOutputs.append(OutputXYZTrajectoryPos.OutputXYZTrajectoryPos(filename, freq, 1))\r\n elif (output == 'xyztrajvel'):\r\n self.myOutputs.append(OutputXYZTrajectoryVel.OutputXYZTrajectoryVel(filename, freq))\r\n elif (output == 'gui'):\r\n self.myOutputs.append(OutputFAHGUI.OutputFAHGUI(filename, freq, 52753, 1, \"MDL_3.0\", 0.0, 0))\r\n\r\n if (self.screen != -1):\r\n self.myOutputs.append(OutputScreen.OutputScreen(self.screen))\r\n\r\n\r\n # Now plots\r\n for plot in self.plots.keys():\r\n freq = self.plots[plot]\r\n if (freq != -1):\r\n\r\n # Initialize a plot\r\n if (not self.doMPL): # Gnuplot\r\n self.xyData[plot] = []\r\n self.graphs[plot] = Gnuplot(debug=0)\r\n else: # Matplotlib\r\n self.xData[plot] = []\r\n self.yData[plot] = []\r\n self.figures[plot] = 0\r\n\r\n # Add the function to plot the data,\r\n # and the frequency at which to execute it\r\n self.myPlots.append([self.plotFunctions[plot], freq])", "def rwgraph_analyze1(input=(None)):\r\n #generates graph\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n\r\n Nt=100\r\n M=20000\r\n #finds max degree of graph and stores list of degrees of nodes\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n #generates data and stores them in lists for varyin M and Nt\r\n X=rwgraph(G,j,M,Nt)\r\n Listnodes=[]\r\n for i in range(M):\r\n Listnodes.append(G.degree(X[i,Nt]))\r\n Nt=10000\r\n M=20000\r\n X=rwgraph(G,j,M,Nt)\r\n Listnodes2=[]\r\n for i in range(M):\r\n Listnodes2.append(G.degree(X[i,Nt]))\r\n Nt=10\r\n M=20000\r\n X=rwgraph(G,j,M,Nt)\r\n Listnodes3=[]\r\n for i in range(M):\r\n Listnodes3.append(G.degree(X[i,Nt]))\r\n Nt=10000\r\n M=200\r\n X=rwgraph(G,j,M,Nt)\r\n Listnodes4=[]\r\n for i in range(M):\r\n Listnodes4.append(G.degree(X[i,Nt]))\r\n fig, ax1 = plt.subplots(figsize =(14,7))\r\n\r\n ##### creates histo gram figure with 2 axis####\r\n ax1.hist([Listnodes,Listnodes2], bins=maxdeg, label=['Nt=100', 'Nt=10000'],color=['g','r'],alpha=0.6)\r\n ax1.set_xlabel('degree of node')\r\n ax1.set_ylabel('frequency of final position of random walks')\r\n\r\n ax1.tick_params(axis='y')\r\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\r\n ax2.hist([degree_dist], bins=maxdeg, label=['graph node frequency'],color=['b'],alpha=0.6)\r\n ax2.set_ylabel('frequency of node degrees for graph')\r\n ax2.tick_params(axis='y')\r\n\r\n ax1.legend(loc=\"center right\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n ax2.legend(loc=\"upper right\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title('M=20000, node degree of final position of random walk, for varying amounts of time', y=1.10, fontsize=20)\r\n fig.tight_layout() # otherwise the right y-label is slightly clipped\r\n plt.grid(b=None)\r\n plt.show()\r\n\r\n #function to generate diction of frequency\r\n def CountFrequency(my_list):\r\n\r\n # Creating an empty dictionary\r\n freq = {}\r\n for item in my_list:\r\n if (item in freq):\r\n freq[item] += 1\r\n else:\r\n freq[item] = 1\r\n return freq\r\n #converts data to approprate form so it can plotted on scatter plot\r\n #frequecy\r\n listfreq1=CountFrequency(Listnodes2)\r\n listfreq2=CountFrequency(Listnodes3)\r\n listfreq3=CountFrequency(Listnodes4)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n #set up lists\r\n z=[]\r\n z2=[]\r\n z3=[]\r\n z_deg=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n #code to create list of only degrees used in simulations\r\n for i in listfreq1:\r\n z.append(listfreq1[i]/(listfreq_deg[i]*20000))\r\n z_deg.append(i)\r\n for i in listfreq2:\r\n z2.append(listfreq2[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq3:\r\n z3.append(listfreq3[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n #extpected prob distribution\r\n E=G.number_of_edges()\r\n z0=[]\r\n z_deg0=[]\r\n for i in listfreq_deg:\r\n z0.append(i/(2*E))\r\n z_deg0.append(i)\r\n #genrates scatter plot figure\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(z_deg, z, label='Nt=10000, M=20000')\r\n plt.scatter(z_deg2, z2,label='Nt=10, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=10, M=200')\r\n plt.plot(z_deg0,z0,label=\"expected prob dist\",alpha=0.5)\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n return None #modify as needed\r", "def saveGraph (self, filename) :\n\t\tss = \"digraph {\\n\"\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\tfor rule in rules :\n\t\t\t\tr = [op.val for op in rule]\n\t\t\t\tr = [i.replace (\"-\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\".\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\"\\'\\'\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"\\\"\\\"\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"/\", \"_\") for i in r]\n\t\t\t\tk = key.replace (\"-\", \"\")\n\t\t\t\tk = k.replace (\"/\", \"_\")\n\t\t\t\tk = k.replace (\".\", \"_tok\")\n\t\t\t\tss += \"\\t\" + k + \" -> \" \n\t\t\t\tss += \" -> \".join (r)\n\t\t\t\tss += \" ;\\n\"\n\t\tss += \"}\"\n\t\tfilestream = open (filename + '.dot', 'w') \n\t\tfilestream.write(ss)\n\t\tfilestream.close ()\n\t\tcmd = 'dot -Tpng -o ' + filename + '.png ' + filename + '.dot'\n\t\tos.system (cmd)\n\t\tcmd = 'rm ' + filename + '.dot'\n\t\tos.system (cmd)", "def plot_onemitexample_R2N_hist_paperfigure(eg_netseed,eg_mitnum,resultsdir='../results/odor_morphs'):\n fig = figure(figsize=(columnwidth,columnwidth/2.0),dpi=300,facecolor='w') # 'none' is transparent\n ax3 = fig.add_subplot(2,3,1)\n ax4 = fig.add_subplot(2,3,2)\n ax5 = fig.add_subplot(2,3,4)\n ax6 = fig.add_subplot(2,3,5)\n ax1 = fig.add_subplot(2,3,3)\n ax2 = fig.add_subplot(2,3,6)\n ## inh = (no_singles,no_joints,no_lat,no_PGs,varyRMP)\n inh_options = [ (0,(False,False,False,False,False),'lat inh') ]\n for ploti,(inhi,inh,inhstr) in enumerate(inh_options):\n R2Ns = []\n lin_R2Ns = []\n chilist = []\n n_accept = 0\n for stimi,stimseed in enumerate(stim_seeds):\n if not salient: net_seeds = [stimseed]\n for neti,netseed in enumerate(net_seeds):\n for ngi,num_gloms in enumerate([3]):\n\n filename, switch_strs \\\n = get_filename(netseed,stimseed,inh,num_gloms,stimi,neti,inhi,resultsdir=resultsdir)\n switches_str = string.join(switch_strs,'')\n ## if the result file for these seeds & tweaks doesn't exist,\n ## then carry on to the next.\n if not os.path.exists(filename): continue\n print filename\n for fitted_mitral in [0,1]:\n ## First the weighted-linear sigmoid:\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+str(fitted_mitral)):\n print \"fitting file\",filename\n refit = True\n else: refit = False\n ## read in params & responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'arb', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n R2Ns.append(R2N_A)\n R2Ns.append(R2N_B)\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax3,ax4,eg_mitnum,mit_fit_params)\n \n ## Linear-rectifier or Linear-sigmoid depending on FULLlin variable above.\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+linextn+str(fitted_mitral)):\n print \"fitting FULLlin file\",filename\n refit = True\n else: refit = False\n ## fit/get the params and responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'lin', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n lin_R2Ns.append(R2N_A)\n lin_R2Ns.append(R2N_B)\n chilist.append(sqrt(chisq))\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax5,ax6,eg_mitnum,mit_fit_params)\n\n n_accept += 1\n\n R2N_max = 1.0\n ax1.hist(clip(R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y1 = ax1.get_ylim()\n ax2.hist(clip(lin_R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n #ax2.hist(clip(chilist,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y2 = ax2.get_ylim()\n yR2Nmax = max(y1,y2)\n print \"Number of mitral cells accepted =\",n_accept\n \n ## beautify plots\n for axnum,ax in enumerate([ax1,ax2]):\n xmin,xmax,ymin,ymax = \\\n beautify_plot(ax,x0min=True,y0min=True,xticksposn='bottom',yticksposn='left')\n ax.set_xlim([0,R2N_max])\n ax.set_xticks([0,R2N_max])\n ax.set_ylim([0,yR2Nmax])\n ax.set_yticks([0,yR2Nmax])\n for ax in [ax1,ax3,ax4]:\n ax.set_xticklabels(['',''])\n ## axes_labels() sets sizes of tick labels too.\n axes_labels(ax1,'','prob. density',adjustpos=False,xpad=0,ypad=0)\n ax1.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax2,'$\\sqrt{residual/noise}$','',adjustpos=False,xpad=1,ypad=0)\n\n axes_labels(ax3,'','firing rate (Hz)',adjustpos=False,xpad=0,ypad=0)\n ax3.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax5,'time (s)','',adjustpos=False,xpad=3,ypad=0)\n\n axes_labels(ax4,'','fitted weight',adjustpos=False,xpad=0,ypad=0)\n ax4.yaxis.set_label_coords(-0.24,-0.3)\n axes_labels(ax6,'conc (% SV)','',adjustpos=False,xpad=3,ypad=0)\n\n fig_clip_off(fig)\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.5) # has to be after tight_layout()\n fig.savefig('../figures/morphs_R2Ns.svg',dpi=fig.dpi)\n fig.savefig('../figures/morphs_R2Ns.png',dpi=fig.dpi)", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def _create_ga_plots(ga_agent, output_directory):\n\n # create trace for plot\n makespans_traces, makespans_layout = _make_ga_traces(ga_agent)\n\n # create plot\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ga_makespans.html'),\n auto_open=False)\n\n # create schedule\n ga_agent.best_solution.create_schedule_xlsx_file(str(output_directory / 'ga_schedule'), continuous=True)\n ga_agent.best_solution.create_gantt_chart_html_file(str(output_directory / 'ga_gantt_chart.html'), continuous=True)", "def index():\n \n currentDateTime = current_datetime()\n fromDateTime = calc_day(currentDateTime, -3)\n\n # Adjust if any graphs should be shown in index page\n # Temperatur=XML(render_graph(3, 5, fromDateTime, currentDateTime, show_dots=False))\n # Procent_smoke=XML(render_graph(3, 6, fromDateTime, currentDateTime, show_dots=False))\n # Kitchen_Stove=XML(render_graph(2, 3, fromDateTime, currentDateTime, show_dots=False))\n # Humid=XML(render_graph(3, 4, fromDateTime, currentDateTime, show_dots=False))\n # Brightness=XML(render_graph(3, 7, fromDateTime, currentDateTime, show_dots=False))\n # Hall_motions=XML(render_graph(1, 1, fromDateTime, currentDateTime, show_dots=False, hits=True))\n # Hall_door=XML(render_graph(1, 2, fromDateTime, currentDateTime, show_dots=False, on_off=['Open', 'Close']))\n\n # return dict(test=locals())\n # return dict(test=device_monitoring)\n return dict()", "def generatePlot (self, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name):\n \n #self.clear_results_directory(results_dir)\n \n XaxisLabel = 'TCD Conversion [%]'\n YaxisLabel = 'Product Yield [wt %]'\n \n self.drawplot(XaxisLabel, YaxisLabel, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name)", "def _create_ts_plots(ts_agent_list, output_directory):\n\n # create traces for plots\n makespans_traces, makespans_layout, \\\n nh_sizes_traces, nh_sizes_layout, \\\n tl_sizes_traces, tl_sizes_layout = _make_ts_traces(ts_agent_list)\n\n # create plots\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ts_makespans.html'),\n auto_open=False)\n plot(dict(data=nh_sizes_traces, layout=nh_sizes_layout),\n filename=str(output_directory / 'neighborhood_sizes.html'),\n auto_open=False)\n plot(dict(data=tl_sizes_traces, layout=tl_sizes_layout),\n filename=str(output_directory / 'tabu_list_sizes.html'),\n auto_open=False)\n\n # create schedule\n best_solution = min([ts_agent.best_solution for ts_agent in ts_agent_list])\n best_solution.create_schedule_xlsx_file(str(output_directory / 'ts_schedule'), continuous=True)\n best_solution.create_gantt_chart_html_file(str(output_directory / 'ts_gantt_chart.html'), continuous=True)", "def print_gsyn(self, filename, gather=True):\n global controller\n timeStep = (controller.dao.machineTimeStep*1.0)/1000.0\n gsyn = self.get_gsyn(gather, compatible_output=True)\n first_id = 0\n num_neurons = self.vertex.atoms\n dimensions = self.vertex.atoms\n fileHandle = open(filename, \"w\")\n fileHandle.write(\"# first_id = %d\\n\" % first_id)\n fileHandle.write(\"# n = %d\\n\" % num_neurons)\n fileHandle.write(\"# dt = %f\\n\" % timeStep)\n fileHandle.write(\"# dimensions = [%d]\\n\" % dimensions)\n fileHandle.write(\"# last_id = %d\\n\" % num_neurons-1)\n utility_calls.check_directory_exists(filename)\n fileHandle = open(filename, \"w\")\n for (neuronId, time, value) in gsyn:\n fileHandle.write(\"%f\\t%d\\t%f\\n\" % (time, neuronId, value))\n fileHandle.close()", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def init_plots() :\n plot_dict = {}\n\n station_dict = {}\n\n for st_id in [ -5, -4, -3, -2, -1, 1, 2, 3, 4, 5 ] :\n prefix = 'station_' + str( st_id ) + '_'\n station_dict[prefix+'spacepoints_xy'] = \\\n ROOT.TH2D( prefix+'spacepoints_xy', \"Spacepoint X-Y Positions\", \\\n 1000, -200.0, 200.0, 1000, 200.0, 200.0 )\n\n plot_dict['station_plots'] = station_dict\n\n\n plot_dict['beam_positions_x'] = ROOT.TH2D( 'beam_positions_x', \\\n \"Distribution of X Positions for each station\", \\\n 11, -5.5, 5.5, 1000, -200.0, 200.0 )\n plot_dict['beam_positions_y'] = ROOT.TH2D( 'beam_positions_y', \\\n \"Distribution of Y Positions for each station\", \\\n 11, -5.5, 5.5, 1000, -200.0, 200.0 )\n plot_dict['beam_profile_x'] = None\n plot_dict['beam_profile_y'] = None\n plot_dict['beam_profile_x_up_fit'] = None\n plot_dict['beam_profile_y_up_fit'] = None\n plot_dict['beam_profile_x_down_fit'] = None\n plot_dict['beam_profile_y_down_fit'] = None\n\n plot_dict['tof_0_1'] = ROOT.TH1F( 'tof_0_1', 'Time TOF0 - TOF1', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_1_2'] = ROOT.TH1F( 'tof_1_2', 'Time TOF1 - TOF2', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_0_1_cut'] = ROOT.TH1F( 'tof_0_1_cut', 'Time TOF0 - TOF1', \\\n 1000, 0.0, 100.0 )\n plot_dict['tof_1_2_cut'] = ROOT.TH1F( 'tof_1_2_cut', 'Time TOF1 - TOF2', \\\n 1000, 0.0, 100.0 )\n\n return plot_dict", "def viz_graph(self, show_ports=False, pydot_options=None):\n import networkx as nx\n G = nx.DiGraph()\n if pydot_options:\n G.graph['graph'] = pydot_options\n # instantiate objects\n for itask in self:\n task_inputs = itask[TaskSpecSchema.inputs]\n to_task = itask[TaskSpecSchema.task_id]\n to_type = itask[TaskSpecSchema.node_type]\n if to_task == \"\":\n to_task = OUTPUT_TYPE\n for iport_or_tid in task_inputs:\n # iport_or_tid: it is either to_port or task id (tid) b/c\n # if using ports API task_inputs is a dictionary otherwise\n # task_inputs is a list.\n taskin_and_oport = task_inputs[iport_or_tid] \\\n if isinstance(task_inputs, dict) else iport_or_tid\n isplit = taskin_and_oport.split('.')\n from_task = isplit[0]\n from_port = isplit[1] if len(isplit) > 1 else None\n if show_ports and from_port is not None:\n to_port = iport_or_tid\n common_tip = taskin_and_oport\n G.add_edge(from_task, common_tip, label=from_port)\n G.add_edge(common_tip, to_task, label=to_port)\n tnode = G.nodes[common_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n else:\n G.add_edge(from_task, to_task)\n\n # draw output ports\n if show_ports:\n\n if (to_type == OUTPUT_TYPE):\n continue\n task_node = get_node_obj(itask, tgraph_mixin=True)\n # task_outputs = itask.get(TaskSpecSchema.outputs, [])\n for pout in task_node._get_output_ports():\n out_tip = '{}.{}'.format(\n itask[TaskSpecSchema.task_id], pout)\n G.add_edge(to_task, out_tip, label=pout)\n tnode = G.nodes[out_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n return G", "def create_plots(self):\n shutil.rmtree(self.param.path, ignore_errors=True)\n os.makedirs(self.param.path)\n\n ids = list(range(len(self.signs)))\n\n \"\"\"True positives\"\"\"\n values, kinds = self.get_evaluations(ids)\n plots.create_plot(\n kinds,\n [e[0] for e in values], # True positives\n save_dir=self.param.path,\n y_label=\"number_tp\",\n file_name=\"number_tp\",\n title=\"Amount of true positives\",\n )\n\n # Only signs with at least one detection!\n ids = [i for i, _ in enumerate(self.signs) if self.signs[i].evaluate()[2] > 0]\n values, kinds = self.get_evaluations(ids)\n\n \"\"\"Distance\"\"\"\n plots.create_plot(\n kinds,\n values=[e[2] for e in values], # Distances\n save_dir=self.param.path,\n y_label=\"distance\",\n file_name=\"distance\",\n title=\"Distance\",\n )\n \"\"\"Precision\"\"\"\n plots.create_plot(\n kinds,\n # Precision signs with at least one detection are used, e[0]+e[1] > 0)\n values=[e[0] / (e[0] + e[1]) for e in values],\n save_dir=self.param.path,\n y_label=\"precision\",\n file_name=\"precision\",\n title=\"Precision\",\n )", "def list_spectrographs(self) -> None:\n for key, item in self.spectrographs.items():\n item.summary()\n print(\"\\n\")", "def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it", "def show_visualizations(self, number = -1):\n instance = self.instance\n if number > instance.view_num:\n print(\"In function show_visualizations: Error, input number greater than the view numbers.\")\n return Page()\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n G = myGraph(instance.view_num)\n for i in range(instance.view_num):\n view = instance.tables[instance.views[i].table_pos].views[instance.views[i].view_pos]\n G.addNode(view)\n G.getSim()\n result = G.getTopK(instance.view_num)\n if number != -1:\n begin = number - 1\n end = number\n else:\n begin = 0\n end = instance.view_num\n page = Page()\n for order in range(begin, end):\n if self.rank_method == methods_of_ranking[3]: # diversified_ranking\n view = G.nodes[result[order]]\n else:\n view = instance.tables[instance.views[order].table_pos].views[instance.views[order].view_pos]\n data = {}\n data['order'] = order\n data['chartname'] = instance.table_name\n data['describe'] = view.table.describe\n data['x_name'] = view.fx.name\n data['y_name'] = view.fy.name\n data['chart'] = Chart.chart[view.chart]\n data['classify'] = [v[0] for v in view.table.classes]\n data['x_data'] = view.X\n data['y_data'] = view.Y\n data['title_top'] = 5\n \n # 以下代码与html_handle相似\n margin = str(data['title_top']) + '%'\n \n if data['chart'] == 'bar':\n chart = (Bar().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart'] == 'pie': \n chart = (Pie().set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin)))\n elif data['chart'] == 'line': \n chart = (Line().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(name=data['x_name']),\n yaxis_opts=opts.AxisOpts(name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n elif data['chart']== 'scatter': \n chart = (Scatter().set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(title_opts=opts.TitleOpts(title=data['chartname'], subtitle=data['describe'], pos_left='center', pos_top=margin),\n xaxis_opts=opts.AxisOpts(type_='value', name=data['x_name'], splitline_opts=opts.SplitLineOpts(is_show=True)),\n yaxis_opts=opts.AxisOpts(type_='value', name=data['y_name'], splitline_opts=opts.SplitLineOpts(is_show=True))))\n else :\n print (\"not valid chart\")\n \n if not data[\"classify\"] :\n attr = data[\"x_data\"][0]\n val = data[\"y_data\"][0]\n if data['chart'] == 'bar': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n if isinstance(attr[0], str):\n attr = [x for x in attr if x != '']\n attr = list(map(float, attr))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr).add_yaxis(\"\", val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n else :\n attr = data[\"x_data\"][0]\n for i in range(len(data[\"classify\"])) :\n val = data[\"y_data\"][i]\n name = (data[\"classify\"][i][0] if type(data[\"classify\"][i]) == type(('a','b')) else data[\"classify\"][i])\n if i == 0:\n if data['chart'] != 'pie' and data['chart'] != 'scatter':\n chart.add_xaxis(attr)\n if data['chart'] == 'bar': \n chart.add_yaxis(name, val, stack=\"stack1\", label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'line': \n chart.add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n elif data['chart'] == 'pie': \n chart.add(\"\", [list(z) for z in zip(attr, val)])\n elif data['chart'] == 'scatter': \n attr_scatter = data[\"x_data\"][i]\n if isinstance(attr_scatter[0], str):\n attr_scatter = [x for x in attr_scatter if x != '']\n attr_scatter = list(map(float, attr_scatter))\n if isinstance(val[0], str):\n val = [x for x in val if x != '']\n val = list(map(float, val))\n chart.add_xaxis(attr_scatter).add_yaxis(name, val, label_opts=opts.LabelOpts(is_show=False))\n page.add(chart)\n return page", "def export(self):\n if self.model.algorithm == 'DecisionTree':\n dot_data = tree.export_graphviz(self.model.clf, out_file=None)\n graph = graphviz.Source(dot_data)\n graph.render(\"exports/DecisionTreeRegressor\")", "def graph(self):\n ...", "def dump_graph(self):\n # TODO\n return", "def visualize(self):\n print('{0} is {1} time steps old'.format(self.name, self.timestep))\n\n self.amygdala.visualize(self.timestep, self.name, self.log_dir)\n self.cerebellum.visualize(self.name, self.log_dir)\n self.cingulate.visualize(self.name, self.log_dir)\n self.hippocampus.visualize(self.name, self.log_dir)\n #self.ganglia.visualize(self.name, self.log_dir)\n #self.cortex.visualize(self.name, self.log_dir)", "def write_plot(self):\n with open(self._graph_data_path, \"w+\") as f:\n run_time = self.start_time\n f.write(\"Time, Temperature\\n\")\n temperature = 0\n for step in self.profile[\"steps\"]:\n keys = list(step)\n if len(keys) > 0:\n if keys[0] == \"start\":\n temperature = step[\"start\"]\n if keys[0] == \"rest\":\n run_time += timedelta(minutes = step[\"rest\"])\n if keys[0] == \"ramp\":\n run_time += timedelta(minutes = step[\"ramp\"])\n temperature = step[\"to\"]\n if keys[0] == \"mashout\":\n temperature = step[\"mashout\"]\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n run_time += timedelta(minutes = 10)\n if keys[0] == \"jump\":\n temperature = step[\"jump\"]\n\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n else:\n logger.error(\"Can't make sense of \" + str(step))", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return", "def main():\n filenames = sys.argv[1]\n fdir = sys.argv[2]\n filenames = filenames.split(',')\n\n # print (filenames)\n graph = PGraph(fdir, filenames, \"Multi-Source Foraging\")\n # graph = PGraph(fdir, filenames, \"Cooperative Transport\")\n # graph = PGraph(fdir, filenames, \"Nest Maintenance\")\n # graph = PGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n graph.gen_plot()\n\n # box = BoxGraph(fdir, filenames, \"Single-Source Foraging\")\n # box = BoxGraph(fdir, filenames, False, (-1, 100), \"Multi-Source Foraging\")\n box = BoxGraph(fdir, filenames, False, (-1, 120), \"Nest Maintenance with Handcoded behaviors\")\n # box = BoxGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n box.gen_plot()", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def main():\n rows = []\n for path in DATA.glob(\"*.tsv\"):\n with path.open() as file:\n _header = next(file)\n for line in file:\n dead_id, when, alt_id = line.strip(\"\\n\").split(\"\\t\")\n rows.append((path.stem, dead_id, when, alt_id))\n\n rows = sorted(rows)\n\n with OUTPUT_PATH.open(\"w\") as file:\n print(*HEADER, sep=\"\\t\", file=file)\n for row in rows:\n print(*row, sep=\"\\t\", file=file)\n\n df = pd.DataFrame(rows, columns=[\"prefix\", \"dead_id\", \"date\", \"alternative_id\"])\n fig, ax = plt.subplots(figsize=(6, 3))\n sns.histplot(data=df, y=\"prefix\", ax=ax)\n ax.set_ylabel(\"\")\n ax.set_xscale(\"log\")\n ax.set_xlabel(\"Dead Identifiers\")\n fig.tight_layout()\n fig.savefig(SUMMARY_SVG_PATH)", "def draw_all(self, file_name=None):\n infos = zip(self.mutations, self.scores, self.seq)\n tuples = sorted(\n [(score, i, nt, alternative)\n for i, (alternative, score, nt) in enumerate(infos)])\n tuples = tuples[:self.k]\n graphs = []\n for score, position, nt, alternative in tuples:\n header = '%s %d %s' % (nt, position, alternative)\n alt_seq = _replace(self.seq, position, alternative)\n _graphs = self.fold([(header, alt_seq)])\n graph = _graphs.next()\n graphs.append(graph)\n\n opts = {'size': 10, 'font_size': 9, 'colormap': 'rainbow',\n 'vertex_border': False, 'vertex_size': 200,\n 'vertex_alpha': 0.4, 'vertex_color': '_label_',\n 'edge_alpha': 0.2, 'edge_label': None,\n 'dark_edge_alpha': 0.8,\n 'ignore_for_layout': 'nesting', 'layout': 'KK',\n 'n_graphs_per_line': 3}\n draw_graph_set(graphs, file_name=file_name, **opts)" ]
[ "0.6862232", "0.6708507", "0.66322786", "0.63818264", "0.624256", "0.6234403", "0.6228745", "0.62106884", "0.6164413", "0.61330986", "0.61289656", "0.6121739", "0.6121739", "0.6121739", "0.61114043", "0.60845", "0.60834754", "0.6047425", "0.6043085", "0.6017747", "0.60091543", "0.6001606", "0.598036", "0.5971068", "0.5923794", "0.590091", "0.5899152", "0.5897972", "0.58952016", "0.58947164", "0.5867381", "0.586235", "0.5858162", "0.5857776", "0.5847163", "0.58098376", "0.58078635", "0.58050436", "0.5796381", "0.57915366", "0.5778048", "0.57634985", "0.5760535", "0.5759099", "0.5733863", "0.57335895", "0.57284486", "0.5723543", "0.5716477", "0.57101923", "0.570756", "0.5700024", "0.56936353", "0.5681121", "0.5681033", "0.56777006", "0.56742233", "0.56720674", "0.56685305", "0.56618744", "0.56586015", "0.5652264", "0.564258", "0.5627759", "0.56236", "0.56144875", "0.5612039", "0.5602429", "0.5597748", "0.5596523", "0.5593313", "0.55904514", "0.5579637", "0.5579382", "0.55788183", "0.55786467", "0.557639", "0.55760825", "0.55759764", "0.55721617", "0.55487853", "0.5543973", "0.5541074", "0.5535882", "0.55324626", "0.5530633", "0.55277944", "0.5526248", "0.5524643", "0.55217695", "0.55216074", "0.5521287", "0.5515247", "0.5515067", "0.55132276", "0.5509382", "0.55091864", "0.55068374", "0.5502692", "0.5501537", "0.5498811" ]
0.0
-1
Getter for the reference (initial board state) biosafe output.
def get_reference(self): return self.PotTax_reference
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bribe(self):\r\n return self.bribe", "def state(self):\r\n return str(self)", "def output(self):\n return ''.join([state[1] for state in self.condensed_output_states])", "def get_reference(self):\t\t\n\t\treturn self._reference", "def state(self) -> str:", "def read_acbr(self):\n return self.ACBR", "def getOutput(self):\r\n return self._output", "def state(self):\n\n\t\treturn str(self)", "def state(self):\n return str(self)", "def b(self):\r\n return self.__b", "def get_output(self):\n return self.output", "def get_output(self):\n return self.output", "def get_output(self):\r\n return self.on", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def out(self):\n return self._out.getvalue()", "def getB(self):\n\t\treturn self.b", "def Get_Reference_Value(self):\r\n return self.__readFromRegister(self.__REG_RW_REFERENCE, 0xff)", "def output(self):\n return self.__output", "def get_bolsa(self):\n return self.bolsa", "def output(self):\r\n return self._output", "def sbom_reference(self) -> pulumi.Output['outputs.SBOMReferenceNoteResponse']:\n return pulumi.get(self, \"sbom_reference\")", "def out(self):\n return self.__out", "def SB ( self ) :\n return self.__sb", "def SB ( self ) :\n return self.__sb", "def output(self):\n return self._output", "def output(self):\n return self._output", "def output(self):\n return self._output", "def get_full_output(self):\n if self.full_output:\n return self.full_output", "def get_output(self):\n return self._output", "def store_biosafe_output(self, data, reference=False, percentage=False):\n if percentage:\n self.biosafe_percentage = data\n elif reference:\n self.biosafe_reference = data\n else:\n self.biosafe_intervention = data\n return", "def full_output_state(self):\n outcomes = self.fock_basis()\n return self.calculate_state_amplitudes(outcomes, reduce_state=False)", "def b(self):\n return self._b", "def b(self):\n return self._b", "def b(self):\n return self._b", "def b(self):\n return self._b", "def __repr__(self):\n s = self.print_bfs()\n return s", "def get_b(self):\n return self._b", "def getvalue(self):\n return self.out.getvalue()", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def get_classical_output(self):\n # If the input is a pattern, return the bit string equivalent to the result in circuit model\n if self.__circuit is not None:\n c_out, q_out = self.__bw_pattern.output_\n\n # Obtain the string\n # Mark the classical outputs with their measurement outcomes and mark quantum outputs with '?'\n vertex_list = [str(self.__client_knowledge[(i, self.__depth - 1)].get_outcome())\n if (i, self.__depth - 1) in c_out else '?'\n for i in range(self.__width)]\n\n bit_str = ''.join(vertex_list)\n return bit_str\n\n # If the input is a graph, return the whole dictionary as the output\n else:\n bit_dict = {pos: self.__client_knowledge[pos].get_outcome()\n for pos in self.__client_knowledge.keys()}\n return bit_dict", "def __repr__(self):\r\n r = str(self.current_instance_state())\r\n return r", "def get_state_display(self, obj):\n return obj.get_state_display()", "def BS ( self ) :\n return self.SB", "def output(self) -> Optional[str]:\n return self.__output", "def __str__(self):\n b = ''\n for i in range(7): # 7 steps in the board\n if i == self.chaser_i: # chaser position\n b += '|' + str(i) + '| chaser |\\n'\n elif i == self.player_i: # player position\n b += '|' + str(i) + '| player |\\n'\n else:\n b += '|' + str(i) + '| |\\n'\n b += '|7| bank |\\n' # bank position\n return b", "def get_output(self, **kwargs):\n return self.out", "def __repr__(self):\n return f\"{self.deck}\"", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def ref(self) -> str:\n return self._ref", "def ref(self):\n return self._ref", "def get_output(self):\n return None, None", "def get_output(self):\n return None, None", "def output(self):\n pdb.set_trace()\n return \"\".join(self.pieces)", "def acc_b(self):\n return self._acc_b", "def reference(self):\n \n return self._reference", "def return_output(self):\n return self.output", "def reference(self):\n return self._reference", "def reference(self):\n return self._reference", "def ref(self):\n\t\treturn self.bottle.ref", "def __pout__(self):\n return self.__str__()", "def output(self, state):\n h, t = state\n\n return h", "def dump(self):\n return self.output", "def get_sequence_output(self):\n return self.sequence_output", "def __repr__(self):\r\n return self.value", "def get_output(self):\n raise NotImplementedError", "def state_read_out(self):\n\n s_ro = utils.pow_read_out(self.s['both'], self.ro_pow)\n return s_ro", "def Current(self) -> str:", "def val(self):\n return self.output", "def val(self):\n return self.output", "def pdbout(self):\n return self._pdbout", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def print(self):\n nfa = self.aut_stack.pop()\n return nfa", "def get_flashcard(self):\n return f'{self.word}; \"{self.definition}\"'", "def get_flashcard(self):\n return f'{self.word}; \"{self.definition}\"'", "def getBanter(self):\r\n return self.__banterAssembler.assembleString()", "def showref_output(self, *arguments, **kwargs):\n return self.get_output('show-ref', *arguments, **kwargs)", "def __repr__(self):\n return f'Board({ self.board !r})'", "def Value(self) -> str:", "def __repr__(self) -> str:\n return f\"cf({self.default_value!r})\"", "def output(self):\n\t\treturn \"\".join(self.pieces)", "def output(self):\r\n return self.result", "def get_display_value(self):\n\n\t\treturn self.__display_value", "def __repr__(self):\n return self._hex", "def print_state(self):\n raise AIError(\"Must be implemented in child class!\")" ]
[ "0.6169057", "0.6052161", "0.60329854", "0.6009293", "0.5998664", "0.5996888", "0.5995191", "0.59844786", "0.59398574", "0.58981454", "0.58868086", "0.58868086", "0.58711606", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58522797", "0.58461803", "0.5831309", "0.5812926", "0.5803668", "0.57995886", "0.5789187", "0.57758373", "0.5749201", "0.5749201", "0.5743462", "0.5743462", "0.5743462", "0.5724384", "0.57169044", "0.5708871", "0.5692609", "0.56916326", "0.56916326", "0.56916326", "0.56916326", "0.56850106", "0.5665592", "0.56627387", "0.5644076", "0.5637859", "0.56223494", "0.5606107", "0.55960554", "0.55760974", "0.55738944", "0.55737776", "0.556469", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.555768", "0.55496347", "0.5546525", "0.55434334", "0.55434334", "0.553697", "0.55353904", "0.5523812", "0.551443", "0.5512992", "0.5512992", "0.55121976", "0.55121285", "0.5506174", "0.5503496", "0.5488081", "0.54854995", "0.54829323", "0.54807824", "0.54744047", "0.54689926", "0.54689926", "0.5449867", "0.5449048", "0.5449048", "0.5441603", "0.5436411", "0.5436411", "0.543425", "0.5427698", "0.5424538", "0.5422906", "0.54222196", "0.5407708", "0.53881425", "0.5388127", "0.53795", "0.5378422" ]
0.0
-1
Getter for the current intervention (board state) biosafe output.
def get_intervention(self): return self.PotTax_intervention
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output(self):\r\n return self.on", "def current_state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"current_state\")", "def getAI(self):\n device = self.reducetoolbar.detectorcombobox.currentText()\n ai = self.calibrationsettings.AI(device)\n return ai", "def get_current_preset(self):\n if self._legacy_anna:\n active_rule = self._domain_objects.find(\"rule[active='true']/directives/when/then\")\n if active_rule is None or \"icon\" not in active_rule.keys():\n return \"none\"\n return active_rule.attrib[\"icon\"]\n\n log_type = \"preset_state\"\n locator = (\n \"appliance[type='thermostat']/logs/point_log[type='\"\n + log_type\n + \"']/period/measurement\"\n )\n return self._domain_objects.find(locator).text", "def ionization(self):\n return self._ionization", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def get_ibat(self):\n return self.read_register(4097, 1, 3)", "def get_state(self):\r\n return self.currentObservation", "def output(self):\n return ''.join([state[1] for state in self.condensed_output_states])", "def get_output(self):\n return self.output", "def get_output(self):\n return self.output", "def get_AIA(self):\n\n return self.get_POW().getAIA()", "def detail_state(self) -> str:\n if isinstance(self.wemo, CoffeeMaker):\n return self.wemo.mode_string\n if isinstance(self.wemo, Insight):\n standby_state = self.wemo.standby_state\n if standby_state == StandbyState.ON:\n return STATE_ON\n if standby_state == StandbyState.OFF:\n return STATE_OFF\n if standby_state == StandbyState.STANDBY:\n return STATE_STANDBY\n return STATE_UNKNOWN\n # Unreachable code statement.\n raise RuntimeError", "def state(self):\n return self.coordinator.data[INVERTER_DEVICE_TYPE][self.base_unique_id][INVERTER_STATE]", "def read_actual_current(self):\n function_string = 'I' + self.output + 'O?'\n value_string = self.scpi_comm(function_string)\n time.sleep(0.1) # This might only be necessary on LAN interface\n try:\n value = float(value_string.replace('A', ''))\n except ValueError:\n value = -9998\n return value", "def get_bribe(self):\r\n return self.bribe", "def ion(self):\n return self._ion", "def get_output(self):\n return self._output", "def state_info(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"state_info\")", "def state(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"state\")", "def state(self):\n\n\t\treturn str(self)", "def state(self):\n return self.roller.battery", "def state(self):\r\n return str(self)", "def state(self):\n return str(self)", "def get_pump_state(self):\n return self.__sensor_states[2]", "def getOutput(self):\r\n return self._output", "def get_display_info(self):\n return self.display_info", "def info(self):\r\n\r\n return self.sim_info", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def get_SIA(self):\n\n return self.get_POW().getSIA()", "def get_output(self):\r\n return self._api.get_output()", "def get_bio_gripper_status(self):\r\n return self._arm.get_bio_gripper_status()", "def getObservation(self):\n return self._cur_state", "def get_state(self) -> Dict[str, Any]:\n log.debug('get_state: {}'.format(self.bonsai_state))\n return self.bonsai_state", "def state(self) -> str:", "def output(self):\r\n return self._output", "def output(self):\n return self._output", "def output(self):\n return self._output", "def output(self):\n return self._output", "def output(self):\n return self.__output", "def get_value(cls, output: CommonConsoleCommandOutput, *args: str) -> SimInfo:\n from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils\n sim_info = super().get_value(output, *args)\n from singletons import UNSET\n if sim_info is UNSET:\n return CommonSimUtils.get_active_sim_info()\n return sim_info", "def get_info(self) -> str:\n return self.info", "def current_state(self):\n return self.obs_hook(self._current_obs)", "def get_interact_value(self):\n return self.value", "def get_state_display(self, obj):\n return obj.get_state_display()", "def egu(self):\n return self.motor_egu.get()", "def info(self) -> str:\n return pulumi.get(self, \"info\")", "def info(self):\n\n if self.running:\n return INFO_RUNNING_FORMAT.format(**self.__dict__)\n else:\n return INFO_ENDED_FORMAT.format(**self.__dict__)", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def state(self):\n return self.probe.get_data(self.variable)", "def state(self):\n return self.probe.get_data(self.variable)", "def get_classical_output(self):\n # If the input is a pattern, return the bit string equivalent to the result in circuit model\n if self.__circuit is not None:\n c_out, q_out = self.__bw_pattern.output_\n\n # Obtain the string\n # Mark the classical outputs with their measurement outcomes and mark quantum outputs with '?'\n vertex_list = [str(self.__client_knowledge[(i, self.__depth - 1)].get_outcome())\n if (i, self.__depth - 1) in c_out else '?'\n for i in range(self.__width)]\n\n bit_str = ''.join(vertex_list)\n return bit_str\n\n # If the input is a graph, return the whole dictionary as the output\n else:\n bit_dict = {pos: self.__client_knowledge[pos].get_outcome()\n for pos in self.__client_knowledge.keys()}\n return bit_dict", "def get_in_act(self):\n in_act = self.act.get_in_act()\n in_act += ' This backfires and negatively affects {performer}.'\n return in_act", "def state(self):\n if self.coordinator.data:\n return self.coordinator.data[self._sensor]", "def get_output(self):\n raise NotImplementedError", "def current_option(self) -> str | None:\n # If the translation key is \"zone_sleep\", we need to translate\n # the value to make it compatible with Home Assistant\n if (\n value := self.capability.current\n ) is not None and self.translation_key == \"zone_sleep\":\n return ZONE_SLEEP_STATE_MAPPING[value]\n\n return value", "def silly(self) -> str:\n print(f\"Getting {self._name}'s State\")\n return self._state", "def output_to_gui(self):\n\n return {\n \"case\": self.case_info,\n \"tick\": self.tick,\n \"period\": self.period,\n \"trader\": self.trader,\n \"limits\": self.limits,\n \"orderbook\": self.orderbook,\n \"securities\": self.securities,\n \"CRZY_candle\": self.CRZY_candle,\n \"TAME_candle\": self.TAME_candle,\n \"candle_length\": self.candle_length,\n # Tender Specific\n \"tenders\": self.__tenders_to_gui(),\n }", "def info(self) -> Optional[Dict[str, Any]]:\n return self._state.get(\"info\", None)", "def output(self) -> Optional[str]:\n return self.__output", "def read_acbr(self):\n return self.ACBR", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def get_state(self):\n return self.controller.get_state()", "def get(self):\n if self.mode == gpio.IN:\n self.state = gpio.input(self.bcm_id)\n\n return self.state", "def info(self):\n return self.nfo", "def getBanter(self):\r\n return self.__banterAssembler.assembleString()", "def _get_obs(self) -> np.ndarray:\n if self._obs_type == \"ram\":\n return self.ale.getRAM()\n elif self._obs_type == \"rgb\":\n return self.ale.getScreenRGB()\n elif self._obs_type == \"grayscale\":\n return self.ale.getScreenGrayscale()\n else:\n raise Error(f\"Unrecognized observation type: {self._obs_type}\")", "def get_icc_off(self):\n return self.icc", "def getCurrentCalibration(self): \n return getI1DisplayCurrentCalibration(self._calibration_list)", "def state(self):\n return self._current_value", "def get_console_output(self):\r\n return self.connection.get_console_output(self.id)", "def get_output(self):\r\n _debug('simq03b_api.get_output')\r\n \r\n x = self.query('OUTP:STAT?')\r\n if x == None: return None\r\n print('Result is ', x) # For knowing the bug that we something have\r\n return int(x)", "def __repr__(self):\r\n r = str(self.current_instance_state())\r\n return r", "def out(self):\n return self._out.getvalue()", "def info(self):\n return self.current_run.info", "def get_current_state(self):\n return self.game.get_current_state()", "def current_operation(self):\n return GH_STATE_TO_HA[self._boiler.mode]", "def accessor(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"accessor\")", "def get_full_output(self):\n if self.full_output:\n return self.full_output", "def get(self):\n return self.stdout.get()", "def get_display_value(self):\n\n\t\treturn self.__display_value", "def full_output_state(self):\n outcomes = self.fock_basis()\n return self.calculate_state_amplitudes(outcomes, reduce_state=False)" ]
[ "0.6088682", "0.5991196", "0.59810793", "0.59794337", "0.59147507", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.58739257", "0.5803603", "0.57730144", "0.57642055", "0.57642055", "0.57426196", "0.57246584", "0.5716621", "0.57114875", "0.57022125", "0.5688254", "0.56559664", "0.56465644", "0.56451297", "0.5643096", "0.5621241", "0.56193763", "0.56132036", "0.56125736", "0.56010497", "0.559031", "0.55842716", "0.5577052", "0.5577052", "0.5577052", "0.5577052", "0.5577052", "0.5577052", "0.55695546", "0.553585", "0.5529869", "0.552794", "0.5526862", "0.55212945", "0.5520812", "0.5516888", "0.5516888", "0.5516888", "0.5515895", "0.5509064", "0.550292", "0.54984933", "0.54974514", "0.54921234", "0.5489885", "0.54571277", "0.54521143", "0.54517233", "0.54517233", "0.5439109", "0.5439109", "0.54356915", "0.54206115", "0.5419159", "0.54190326", "0.54190296", "0.54183716", "0.5417843", "0.53998923", "0.53971034", "0.53894156", "0.53786606", "0.53786606", "0.53553826", "0.533981", "0.5332247", "0.53316635", "0.53278464", "0.5325549", "0.53248924", "0.53057075", "0.53039885", "0.5301894", "0.5299811", "0.52990705", "0.5297563", "0.5295744", "0.52921164", "0.5289992", "0.52848023", "0.5282449", "0.52817315", "0.5281129" ]
0.56280655
32
Getter for the percentage difference.
def get_percentage(self): return self.PotTax_percentage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def get_percent(self):\n return self.percent", "def pct(self):\n\t\treturn self.bottle.pct()", "def percent(self):\r\n return self._percent", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def get_diff_and_percentage(self, first, second, state):\n difference = first - second\n per_difference = (difference / second) * 100\n total_percentage = (first / self.populations[state]) * 100\n return [difference, per_difference, total_percentage]", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def total_differences(self):\n return self._total_diffs", "def diff(self, content):\n\n self.differ.set_seq2(self.make_hash_sequence(content))\n percent_diff = (1.0 - self.differ.ratio()) * 100.0\n percent_diff = 1 if 0 < percent_diff < 1 else int(round(percent_diff, 0))\n\n if percent_diff != 0 and len(content) < self.expected_length:\n percent_diff *= -1\n\n return percent_diff", "def height_percent(self):\n return self.container['height_percent']", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def __get_change(self, current, previous):\n current = float(current)\n previous = float(previous)\n if current == previous:\n return 0\n try:\n r = (abs(current - previous) / previous) * 100\n if r > 100:\n r = 100\n return round(r)\n except ZeroDivisionError:\n return 100", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def remaining_percent(self):\n return (self.remaining_words / self.total_words) * 100", "def ratio(self):\n try:\n return self.fields['uploadedEver'] / float(self.fields['downloadedEver'])\n except ZeroDivisionError:\n return 0.0", "def percent_b(self) -> float:\n return self._percent_b", "def pct_change(self):\n return self.close.pct_change()", "def price_diff_rel(self): \n try:\n return(self.price_diff / self.price_open)\n except:\n return", "def percent_left(self):\n return 100 - self.percent_complete", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def price_diff_rel_d(self): \n try:\n return(self.price_diff_d / self.price_open)\n except:\n return", "def get_online_price_diff_percent_method(self):\n try:\n if self.overclockerskz and self.overclockerskz.online_price:\n return int((self.get_online_price_diff_method() / self.overclockerskz.online_price) * 100)\n else:\n return 0\n except (TypeError, ValueError):\n return 0", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def difference_from_timestamp(self, timestamp: float) -> float:\n\n return self - round(timestamp * 100)", "def getPercent(*args):", "def getPercent(*args):", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def denominator(self):\n return 1", "def percentage_change(old_value, new_value):\n\n result = float(100 * (new_value - old_value) / old_value)\n\n return result", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def get_percentComplete(self):\n val = self.collection.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def get_percent_interest(self):\n return self.__percentage_interest", "def depth_percent(self):\n return self.container['depth_percent']", "def price_diff(self):\n try:\n return(self.price_close - self.price_open)\n except:\n return", "def compare(self):\n self.PotTax_increase = self.PotTax_intervention - self.PotTax_reference\n self.PotTax_percentage = (\n (self.PotTax_increase / self.PotTax_reference) * 100)\n \"\"\"\n # this sets the PotTax_percentage to actual percentages.\n self.PotTax_percentage['TFI'] = pd.Series(\n [\"{0:.2f}%\".format(val * 100) for val in\n self.PotTax_percentage['TFI']],\n index = self.PotTax_percentage.index)\n \"\"\"\n return", "def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def __get__(self) -> float:\n\n return float(self.balance)", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def progress(self):\n try:\n return 100.0 * (self.fields['sizeWhenDone'] - self.fields['leftUntilDone']) / float(self.fields['sizeWhenDone'])\n except ZeroDivisionError:\n return 0.0", "def get_percentComplete(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def percentage(a, b):\n return (a * 100.0) / b", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def delta(self) -> float:\n return self._delta", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def percentages(self) -> pandas.Series:\n if self._percentages is None:\n scalar = 1 if self.use_fraction else 100\n self._percentages = scalar * self.counts/self.total\n return self._percentages", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def completion_percent(self) -> Optional[float]:\n return pulumi.get(self, \"completion_percent\")", "def __float__(self):\n return self.num/self.denom", "def __float__(self):\n return self.num/self.denom", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def progress(self) -> float:\n return self._progress", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def unit_of_measurement(self) -> Any:\n return PERCENTAGE", "def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)", "def get_prop(self):\n\t\tnewframe = copy.deepcopy(self)\n\t\tfor f in newframe.header[1:]:\n\t\t\tsum = newframe.sum_field(f)\n\t\t\tfor d in newframe:\n\t\t\t\ttry:\n\t\t\t\t\td[f]= d[f]/float(sum)*100\n\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\td[f] = 0\n\t\t\n\t\treturn newframe", "def mask_percentage(self):\n return 100 - self.tissue_percentage", "def GetProportion(self):\r\n\r\n return self.proportion", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def mdape(self) -> float:\n return float(np.median(np.abs(self._percentage_error())) * 100)", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def ratio_value(self, langue=EN):\n if langue == EN:\n return self[EN_COUNT]-2*self[EN_ERR_COUNT]\n if langue == FR:\n return self[FR_COUNT]-2*self[FR_ERR_COUNT]", "def progress(self):\n return self.progressValue", "def percent_waiting(self):\n return self._percent_waiting", "def report_rest_percentage(self):\n self._logger.info(\"Running report for device {}\".format(self.name))\n tot_time_since_bday = (datetime.utcnow() - self.bday).total_seconds() # duration\n time_at_rest = self.total_seconds_rested\n return time_at_rest / float(tot_time_since_bday) * 100", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% in \" + self.name\n return text", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def calc_percent(byte_counter, data_len):\n if data_len is None or not data_len:\n # case where length is not present in metadata or zero\n return '---.-%'\n return '%6s' % ('%3.1f%%'\n % (float(byte_counter) / float(data_len) * 100.0))", "def get_status(self):\n return str(self.percentage) + \"%\", self.downloaded, self.speed", "def get_as_float(self):\n return float(self.numerator / self.denominator)", "def unit_of_measurement(self):\n return '%'", "def get_percent_change(pd_dataframe):\n time_format = \"%Y-%m-%d\"\n\n open_val = pd_dataframe.iloc[0]['Open']\n close_val = pd_dataframe.iloc[-1]['Adj Close']\n\n if open_val == 0:\n return \"N/A\"\n else:\n return round((close_val - open_val) / open_val * 100, 2)", "def denominator(self, ???):", "def get(self) -> float:\n ...", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)" ]
[ "0.7979501", "0.7979501", "0.766055", "0.74162847", "0.7349282", "0.71601516", "0.7119589", "0.7054972", "0.70371413", "0.70371413", "0.7024928", "0.70031303", "0.6962077", "0.69187254", "0.68619835", "0.68011016", "0.6790376", "0.676223", "0.67388844", "0.6734876", "0.6705234", "0.6704912", "0.6653166", "0.65953046", "0.65382874", "0.6506022", "0.650508", "0.6493475", "0.6490447", "0.6485712", "0.64298296", "0.6388002", "0.6380571", "0.63780177", "0.63544637", "0.63471013", "0.63471013", "0.6287243", "0.62357444", "0.62325495", "0.62298685", "0.6213588", "0.6200452", "0.6197516", "0.6189735", "0.61665446", "0.61601657", "0.61572367", "0.6146441", "0.61438", "0.6133859", "0.6132699", "0.6119379", "0.61173165", "0.6114115", "0.6114115", "0.61115676", "0.61115676", "0.61093724", "0.61034644", "0.6101184", "0.60999995", "0.60999995", "0.6095611", "0.6074444", "0.6073942", "0.6073942", "0.6070825", "0.60705924", "0.60705924", "0.60705924", "0.60705924", "0.60656965", "0.60540414", "0.6048102", "0.60378295", "0.6021293", "0.6001214", "0.5995571", "0.59804016", "0.598033", "0.59650177", "0.5960832", "0.5952361", "0.5939771", "0.5932348", "0.59320533", "0.5923751", "0.5915937", "0.5913062", "0.5910003", "0.5904784", "0.588845", "0.58771205", "0.5875814", "0.5874193", "0.5872439", "0.58688706", "0.58668274", "0.5862491" ]
0.712392
6
Getter for the score.
def get_score(self): return self.score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getScore(self):\r\n return self._score", "def get_score(self):\n return self.__score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def getScore(self):\n return self._score", "def get_score(self):\n\n return self._score", "def get_score(self):\n return self.score", "def get_scores(self):\n return self.score", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def get_score(self):\n return float(self._score)", "def get_score(self):\r\n return self.lcp.get_score()", "def score(self) -> int:\n return self._score", "def get_score(self):\r\n return None", "def score(self) -> str:\n return self._score", "def get_score(self) -> int:\n return self.rstate.score()", "def get_score(self):\n return tuple(self.score)", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def getScore(data):\n return score", "def get_r_score(self):\n return self.r_score", "def score(self):\n return None", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def score(self):\n raise NotImplementedError()", "def score(self) -> int:\n return self.__state.score()", "def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1", "def get_g_score(self):\n return self._g_score", "def getScore(self):\n return sum(self.field)", "def getScore(self,board):\n return board.getScore()[self.tile]", "def elution_score(self):\n return self.score", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]", "def get_score(self, score_index) -> float:\n return self._scores[score_index - 1]", "def score(self):", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def getScore(self, i):\n return self.scores[i - 1]", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def game_score(self):\n score = self.score.quantize(Decimal('0.001'))\n return score if score > 0 else 0", "def match_score(self):\n return self._match_score", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def scores_(self):\n return self.predictor.scores_", "def read_score(self):\n file_path = 'score.txt'\n \n with open(file_path, 'r') as f:\n score = f.read()\n\n if score == '':\n return 0\n else:\n return int(score)", "def getSubmissionScore(submission):\r\n return submission.score", "def get_r2_score(self):\n return self.r2_score", "def get_score(self):\n return sum([Letters.get_value(tile.letter) for tile in self.tiles])", "def get_score(self, solution: np.array) -> float:\n pass", "def dire_score(self):\n return self._get(\"dire_score\")", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")", "def get_current_score(self):\n\n # Return the player's current turn score\n return self._current_score", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None", "def get_score(self):\n\n # if the prev was a spare we need to get the total of 2 balls\n # and the result of the first ball added\n if self.is_prev_spare:\n if self.is_strike():\n return self._total()\n else:\n return self.first_ball * 2 + self.second_ball\n # if the prev prev was a strike it is a special case\n elif self.is_prev_strike and self.is_prev_prev_strike:\n if self.is_strike():\n return self._total()\n else:\n return self._total() * 2 + self.first_ball\n elif self.is_prev_strike and not self.is_prev_prev_strike:\n if self.is_strike():\n return self._total()\n else:\n return self._total() * 2\n pass\n else:\n # it seems we don't have a special case here\n return self.first_ball + self.second_ball", "def get_score(self, student_answers):\r\n pass", "def get(self):\n score = self._evaluate(self.y_true, self.y_pred)\n\n return score", "def getScore(self, node):\n return self.getGravityScoreFromNode(node) or 0", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def __get_score(self, game_state, move):\n return self.q_values[self.__encode_state(game_state)][move][0]", "def get_rewards_score(self) -> Address:\n return self._rewards_score.get()", "def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0", "def score_coefficient(self):\n return self.predictor._score_coefficient", "def extract_score(self, json):\n\t\ttry:\n\t\t\treturn int(json['player_score'])\n\t\texcept KeyError:\n\t\t\treturn 0", "def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList", "def get_total_score(self):\n\n # Return the player's total score\n return self._total_score", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def get_score(self, a, b):\n ### FILL IN ###", "def get_score(self, node):\n return self.get_node_gravity_score(node) or 0", "def min_score(self):\n return min(self._extract_set('score') or [0])", "def max_score(self):\n return self.points", "def get_score(self, node):\r\n return self.get_node_gravity_score(node) or 0", "def boxscore(self):\n return Boxscore(self._boxscore)", "def getMatchScore(self) -> str:\n score = self.__getDataField(\"score\")\n # some matches do not have a score\n if not score:\n return None\n\n # change scome characters in the score to make it easier afterwards\n return score.strip().replace(\"–\", \"-\")", "def get_token_score(self) -> Address:\n return self._token_score.get()", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def update_score():\n pass", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def scoring(self):\n pass", "def get_value(self):\n #Finds all of the values in the cards\n score_list=[Card.get_value(card) for card in self.cards]\n #Sums the scores\n if self.num_cards() > 0:\n total_score=reduce((lambda x,y: x+y),score_list)\n return total_score\n else:\n return 0", "def score(self) -> int:\n return self.function(self.x, self.y)", "def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def radiant_score(self):\n return self._get(\"radiant_score\")", "def calculate_score(self):\n try:\n self.score = self.__total_comment_score / float(self.num_comments)\n except ZeroDivisionError:\n self.score = float(0)", "def score_name(self) -> str:\n return self._score_name", "def get_h_score(self):\n if self._h_score is None:\n self._h_score = self._heuristic.compute(self)\n return self._h_score", "def get_score(cfg):\n key = (cfg.mut, cfg.pH)\n return lazy_load(SCORE_MAP, key, read_score, get_score_path, cfg)", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def score(self):\n return 1 if self.succeeded() else 0", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def get_score(self):\r\n max_score = None\r\n score = None\r\n\r\n #The old default was None, so set to 1 if it is the old default weight\r\n weight = self.get_weight()\r\n if self.is_scored:\r\n # Finds the maximum score of all student attempts and keeps it.\r\n score_mat = []\r\n for i in xrange(0, len(self.task_states)):\r\n # For each task, extract all student scores on that task (each attempt for each task)\r\n last_response = self.get_last_response(i)\r\n score = last_response.get('all_scores', None)\r\n if score is not None:\r\n # Convert none scores and weight scores properly\r\n for z in xrange(0, len(score)):\r\n if score[z] is None:\r\n score[z] = 0\r\n score[z] *= float(weight)\r\n score_mat.append(score)\r\n\r\n if len(score_mat) > 0:\r\n # Currently, assume that the final step is the correct one, and that those are the final scores.\r\n # This will change in the future, which is why the machinery above exists to extract all scores on all steps\r\n scores = score_mat[-1]\r\n score = max(scores)\r\n else:\r\n score = 0\r\n\r\n if self._max_score is not None:\r\n # Weight the max score if it is not None\r\n max_score = self._max_score * float(weight)\r\n else:\r\n # Without a max_score, we cannot have a score!\r\n score = None\r\n\r\n score_dict = {\r\n 'score': score,\r\n 'total': max_score,\r\n }\r\n\r\n return score_dict", "def max_score(self):\n return max(self._extract_set('score') or [0])", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def get_score(self, obj):\r\n query = \"\"\"\r\n SELECT SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id = %%s\"\"\" % qn(self.model._meta.db_table)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id, obj._get_pk_val()])\r\n result = cursor.fetchall()[0]\r\n # MySQL returns floats and longs respectively for these\r\n # results, so we need to convert them to ints explicitly.\r\n return {\r\n 'score': result[0] and int(result[0]) or 0,\r\n 'num_votes': int(result[1]),\r\n }", "def disp_score():", "def scoreEvaluationFunction(gameState):\n return gameState.getScore()", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore" ]
[ "0.91512865", "0.9107829", "0.908982", "0.908982", "0.908982", "0.9061592", "0.8963942", "0.8736967", "0.86330646", "0.8615216", "0.8579917", "0.8570744", "0.8394626", "0.8275839", "0.8265239", "0.8202533", "0.8185984", "0.8136764", "0.80746853", "0.8073835", "0.79118806", "0.78831774", "0.78378874", "0.7799484", "0.7703063", "0.7699903", "0.7696578", "0.7686645", "0.76244515", "0.7621472", "0.7606836", "0.7591086", "0.7584187", "0.7578819", "0.75570977", "0.75311476", "0.75089586", "0.74512535", "0.74156123", "0.7410601", "0.7363334", "0.73618656", "0.73347896", "0.7269247", "0.72618526", "0.7251393", "0.7230929", "0.72146004", "0.72121656", "0.7210231", "0.7205862", "0.72047997", "0.7133028", "0.7131643", "0.7122917", "0.71124196", "0.7067501", "0.70163304", "0.70121133", "0.70081043", "0.7005904", "0.6994214", "0.69761735", "0.69409597", "0.69291097", "0.6920399", "0.6913965", "0.69110227", "0.6908091", "0.69066685", "0.6895294", "0.68766886", "0.68712455", "0.68712294", "0.68655634", "0.68625385", "0.6856846", "0.6847928", "0.6828036", "0.6822452", "0.6815012", "0.6809374", "0.6804348", "0.6803459", "0.68009967", "0.6796584", "0.67941684", "0.6792874", "0.6790092", "0.678586", "0.6781815", "0.67598623", "0.67426544", "0.6741009", "0.67273116", "0.6706555", "0.6703041", "0.66963136" ]
0.91986686
2
Function that prints the biosafe output. Useful for doing multiple runs (e.g. MC), not called in Virtual River.
def print_output(self): print("Reference score: " + str(self.PotTax_reference.sum().TFI)) print("Intervention score: " + str(self.PotTax_intervention.sum().TFI)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out():\n pass", "def print(self):\r\n self.print_avec_separateur()", "def print(self):\n self.print_avec_separateur(\" \")", "def printOutput(self):\n pass", "def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the write method with sys.stdout as the file.\n write(file=sys.stdout)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def Print(self, f=sys.stdout):\n print>>f, \"\"\n print>>f, \"Item number:\", self.n.encode('utf8')\n print>>f, \"Auction:\", self.auction.encode('utf8')\n print>>f, \"Lot number:\", self.nlot.encode('utf8')\n print>>f, \"Lot:\", self.lot.encode('utf8')\n print>>f, \"Start price:\", self.startPrice.encode('utf8')\n print>>f, \"Organizer:\", self.organizer.encode('utf8')\n print>>f, \"Application end date:\", self.applicationEndDate.encode('utf8')\n print>>f, \"Auction date:\", self.date.encode('utf8')\n print>>f, \"State:\", self.state.encode('utf8')\n print>>f, \"Winner:\", \n if self.winner: print>>f, self.winner.encode('utf8')\n else: print>>f, \"\"", "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def dump_to_console(name, results, benchmark):\r\n for a in affinities:\r\n print(\" \"+a)\r\n res = results[a]\r\n print(\"\\t\"+\"#thr\"+\"\\t\"+\"time\"+\"\\t\"+\"spdup\"+\"\\t\"+\"effc\"+\"\\t\"+\"raw\")\r\n for i in range(len(res[\"threads\"])):\r\n print(\"\\t{0}\\t{1:.2f}\\t{2:.2f}\\t{3:.4f}\\t{4}\".format(\r\n res[\"threads\"][i],\r\n res[\"avg\"][i],\r\n res[\"speedup\"][i],\r\n res[\"efficiency\"][i],\r\n benchmark[a][res[\"threads\"][i]]))\r\n print()", "def text_output(self):\n print(self.board)\n print()", "def stdout(self):\n pass", "def print_banner(self):\n print \":##::::'##::'#######::'########:::::::::::::::'###::::'########::'####:\\n\\\n:###::'###:'##.... ##: ##.....::::'##::::::::'## ##::: ##.... ##:. ##::\\n\\\n:####'####: ##:::: ##: ##::::::::: ##:::::::'##:. ##:: ##:::: ##:: ##::\\n\\\n:## ### ##: ##:::: ##: ######:::'######::::'##:::. ##: ########::: ##::\\n\\\n:##. #: ##: ##:::: ##: ##...::::.. ##.::::: #########: ##.....:::: ##::\\n\\\n:##:.:: ##: ##:::: ##: ##::::::::: ##:::::: ##.... ##: ##::::::::: ##::\\n\\\n:##:::: ##:. #######:: ##:::::::::..::::::: ##:::: ##: ##::::::::'####:\\n\\\n:..:::::..:::.......:::..:::::::::::::::::::..:::::..::..:::::::::....:\"", "def output(*args):\n print(*args, end='', file=file)", "def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")", "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "def _print_output(*args):\n for arg in args:\n print(arg)\n print('\\n')", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def print_hand(self):\n if self.cheating:\n print(\"You're cheating!\")\n print(\"until you reroll it!\")\n print(\"\"\"\nYou rolled:\na = [ {} ]\nb = [ {} ]\n\nYou are in Stage {}\n \"\"\".format(self.die_a, self.die_b, self.stage))", "def main():\n print \"Printing Sample Status\"", "def console(self):\n fricas_console()", "def out(*args):\r\n print(*args)", "def display(self) -> str:\n lines, _, _, _ = self._display_aux()\n return '\\n'.join(lines)", "def help_dump(self):\n print(DUMP)", "def _printable(self):\n toPrint = \"Command Header. Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Instruction: \" + str(self.instr) + \" \"\n toPrint = toPrint + \"Notify: \" + str(self.notify) + \" \"\n toPrint = toPrint + \"Block: \" + str(self.block) + \" \"\n toPrint = toPrint + \"Action: \" + str(self.action)\n return toPrint", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def _printable(self):\n toPrint = \"Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Outcome: \" + str(self.outcome) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Datetime: \" + str(self.datetime)\n return toPrint", "def print_output():\n\tprint ''.join([str(x)+\"\" for x in output])", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def _printable(self):\n pass", "def print(cls, vas):\n print(vas)", "def _printable(self):\n\n toPrint = \"Xtra Qubit: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Angle Step: \" + str(self.step) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Command Length: \" + str(self.cmdLength)\n\n return toPrint", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def printwf(data):\n print data #replace for Py3\n sys.stdout.flush()\n sys.stderr.flush()", "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)", "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "def banner():\n print(\"\\033[32m\")\n print(\" ___ _ ___ _ _ _\")\n print(\" | _ )_ _ _ _| |_ ___ | __| |_ ___ _ _ _ _ __ _| | | | ___ ___ _ __\")\n print(\" | _ \\ '_| || | _/ -_) | _|| _/ -_) '_| ' \\/ _` | | | |__/ _ \\/ _ \\ '_ \\\\\")\n print(\" |___/_| \\_,_|\\__\\___| |___|\\__\\___|_| |_||_\\__,_|_|_|____\\___/\\___/ .__/\")\n print(\" |___| |_|\")\n print(\"\\033[0m\")", "def print(self):\n print(self.pretty_str())", "def eprint(sev:str, text:str) -> None:\n print(\"zeropage-rom-generator-{}: {}\".format(sev, text), file=sys.stderr)\n if sev == \"E\":\n sys.exit(1)", "def print_start_game():\n print(HANGMAN_ASCII_ART)\n print(MAX_TRIES)", "def test_print(chikin):\n chikin.print()", "def print_cust(self, msg):\n print(msg, end='')", "def nice_output(self):\n return 'Inning {0}'.format(self.num)", "def print_outcome(self) -> None:\n pass", "def print(self):\n # Your implementation here", "def print_summary_fuel_reactor(fs):\n print(\"\\nResults:\")\n print(\"==========================================\")\n print(\"---Moving Bed Fuel Reactor---\") \n \n print(\"\\nInlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[0,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[0,'H2O']), \"mol/s\",\n \"\\nCH4: \", value(fs.MB_fuel.F[0,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[0,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[0,'H2O']), \"kg/s\",\n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[0,'CH4']), \"kg/s\")\n print(\"\\nOutlet gas: \", \n \"\\nCO2: \", value(fs.MB_fuel.F[1,'CO2']), \"mol/s\",\n \"\\nH20: \", value(fs.MB_fuel.F[1,'H2O']), \"mol/s\", \n \"\\nCH4: \", value(fs.MB_fuel.F[1,'CH4']), \"mol/s\",\n \"\\nCO2: \", value(fs.MB_fuel.Gas_M[1,'CO2']), \"kg/s\",\n \"\\nH20: \", value(fs.MB_fuel.Gas_M[1,'H2O']), \"kg/s\", \n \"\\nCH4: \", value(fs.MB_fuel.Gas_M[1,'CH4']), \"kg/s\")\n print(\"\\nInlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[1,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[1,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[1,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[1,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[1,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[1,'Al2O3']), \"kg/s\")\n print(\"\\nOutlet solids: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_F[0,'Fe2O3']), \"mol/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_F[0,'Fe3O4']), \"mol/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_F[0,'Al2O3']), \"mol/s\",\n \"\\nFe2O3: \", value(fs.MB_fuel.Solid_M[0,'Fe2O3']), \"kg/s\",\n \"\\nFe3O4: \", value(fs.MB_fuel.Solid_M[0,'Fe3O4']), \"kg/s\", \n \"\\nAl: \", value(fs.MB_fuel.Solid_M[0,'Al2O3']), \"kg/s\") \n \n print(\"\\nGas inlet velocity: \", value(fs.MB_fuel.vg[0]), \"m/s\")\n print(\"Gas outlet velocity: \", value(fs.MB_fuel.vg[1]), \"m/s\")\n print(\"Solids velocity: \", value(fs.MB_fuel.vs), \"m/s\") \n \n print(\"\\nHeat of reaction @ z=0: \", \n value(fs.MB_fuel.DH_rxn_s[0]), \"J/(mol reaction)\")\n print(\"Heat of reaction @ z=1: \", \n value(fs.MB_fuel.DH_rxn_s[1]), \"J/(mol reaction)\")\n \n print(\"\\nCH4 conversion: \", value(fs.MB_fuel.X_gas)*100, \" %\")\n print(\"Fe2O3 conversion: \", value(fs.MB_fuel.X_OC)*100, \" %\")\n \n print('\\nPressure @inlet: ', value(fs.MB_fuel.P[0]))\n print('Pressure @outlet: ', value(fs.MB_fuel.Gas_Out_P))\n \n print(\"\\nReactor bed height:\", value(fs.MB_fuel.L), \" m\")\n print(\"Reactor bed diameter:\", value(fs.MB_fuel.Dr), \" m\")\n# print(\"Refractory wall thickness\", value(fs.MB.refractory_th), \" m\")\n \n print(\"\\nInlet gas flow:\", value(fs.MB_fuel.Gas_In_F), \" mol/s\")\n print(\"Outlet gas flow:\", value(fs.MB_fuel.Ftotal[1]), \" mol/s\")\n print(\"Inlet solids flow:\", value(fs.MB_fuel.Solid_In_M), \" kg/s\")\n print(\"Outlet solids flow:\", value(fs.MB_fuel.Solid_Out_M), \" kg/s\")\n print(\"Inlet solids temperature:\", value(fs.MB_fuel.Solid_In_Ts), \" K\")\n print(\"Outlet solids temperature:\", value(fs.MB_fuel.Solid_Out_Ts), \" K\")\n \n print(\"Inlet gas temperature:\", value(fs.MB_fuel.Tg[0]), \" K\")\n print(\"Outlet gas temperature:\", value(fs.MB_fuel.Tg[1]), \" K\") \n \n print(\"\\nInlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[1,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[1,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[1,'Al2O3']))\n print(\"Outlet solid mass fractions: \", \n \"\\nFe2O3: \", value(fs.MB_fuel.x[0,'Fe2O3']),\n \"\\nFe3O4: \", value(fs.MB_fuel.x[0,'Fe3O4']), \n \"\\nAl2O3: \", value(fs.MB_fuel.x[0,'Al2O3']))", "def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)", "def print_instrumented(self) -> None:\n\n print(\n \"\\n\".join(\n f\"{t[1].__class__.__name__} of {t[1].__class__.__module__} component: \"\n f\"{str(t[0])}\" for t in self.instrumented()\n )\n )", "def print_frames(frames):\n for i, frame in enumerate(frames):\n clear_output(wait=True)\n print(frame['frame'])\n print(f\"Episode: {frame['episode']}\")\n print(f\"Timestep: {i + 1}\")\n print(f\"State: {frame['state']}\")\n print(f\"Previous action: {frame['action']}\")\n if frame['action'] == 0:\n print(\"Action is: south\")\n if frame['action'] == 1:\n print(\"Action is: north\")\n if frame['action'] == 2:\n print(\"Action is: east\")\n if frame['action'] == 3:\n print(\"Action is: west\")\n if frame['action'] == 4:\n print(\"Action is: pickup passenger 1 \") \n if frame['action'] == 5:\n print(\"Action is: dropoff passenger 1\")\n if frame['action'] == 6:\n print(\"Action is: pickup passenger 2\")\n if frame['action'] == 7:\n print(\"Action is: dropoff passenger 2\")\n print(f\"Reward: {frame['reward']}\")\n print(f\"Total Reward: {frame['total reward']}\")\n time.sleep(.5)", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def print_post():\n print('| | |'),", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def display(self):\n disptxt = str(self)\n if self.width == 0 or self.has_output:\n print(disptxt)\n else:\n print(\"\\r\", end='')\n print(disptxt, end='')\n sys.stdout.flush()", "def display(self):\n print(self)", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)", "def print_results(self):\n pass", "def console(out):\n logging.debug(out)\n try:\n print(out)\n except UnicodeEncodeError:\n print(re.sub(r'([^\\s\\w]|_)+', '', out))", "def show_banner():\n print(\"\"\"\n _ _ _ _ _____ _______\n| | | | / \\ | | |_ _\\ \\ / / ____|\n| |_| | / _ \\ | | | | \\ \\ / /| _|\n| _ |/ ___ \\| |___ | | \\ V / | |___\n|_| |_/_/ \\_\\_____|___| \\_/ |_____|\n\n\nA super fast asynchronous http and https prober, to check who is (h)alive.\nDeveloped by gnc\n \"\"\")", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def print_methods():\n print('''1. Sobol Variance Based:\n first and total order''')\n print('''2. Regional Sensitivity Analysis:\n also called Monte Carlo Filtering''')\n print('''3. Morris Screening Method:\n with pre-optimized defined trajects and group option''')\n print('''4. Sampled-OAT:\n Latin HYpercube or Sobol sampling with OAT sensitivity''')\n print('''5. Standardized Regression Coefficients:\n Latin HYpercube or Sobol sampling with linear regression''')\n print('''6. DYNamic Identifiability Analysis:\n Latin HYpercube or Sobol sampling with time-sliced based\n evaluation''')", "def p(self):\n self.printstdout = True", "def _print_custom(self):\n pass", "def showme(message):\n print(message)", "def printInfo():\n print('\\t' * 6 + 'Combinational Circuit Paths')\n\n print('-' * 75)\n\n print('Input: Verilog file with Gate Level Modelling')\n print('Output: All paths from input to output of the circuit described by the Verilog file')\n print('(Optional: Graph of the circuit can also be exported)')\n\n print('-' * 75, end='\\n\\n')", "async def print_processor(self) -> None:\n try:\n while True:\n while self.print_queue.empty() is not True:\n stub = await self.print_queue.get()\n if isinstance(stub, str):\n print(stub)\n elif isinstance(stub, tuple):\n if stub[0] == \"error\":\n print(f\"{r}{stub[1]}{reset}\")\n elif stub[0] == \"warning\":\n print(f\"{y}{stub[1]}{reset}\")\n elif stub[0] == \"success\":\n print(f\"{g}{stub[1]}{reset}\")\n elif stub[0] == \"bold\":\n print(f\"{bold}{stub[1]}{reset}\")\n else:\n print(f\"{stub[1]}\")\n self.print_queue.task_done()\n await asyncio.sleep(0.002)\n except asyncio.CancelledError:\n print('Closing the RedCisco application... Cleaning up running tasks...\\n')", "def display_collected():\n os.system('clear') # clearscreen\n print('BS4 widget generator')\n print('-' * 20)\n print('options selected:')\n for col in collected:\n print(col)\n\n print('-' * 20)\n\n return", "def test_perform_display_print(capsys):\n assert sync_perform(stdio_dispatcher, Effect(Display(\"foo\"))) is None\n out, err = capsys.readouterr()\n assert err == \"\"\n assert out == \"foo\\n\"", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def print_banner(message):\n\n print(\"#############################################################################\")\n print(message)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def _printable(self):\n toPrint = \"Measurement Outcome header. \"\n toPrint += \"measurement outcome: \" + str(self.outcome) + \" \"\n\n return toPrint", "def print_header(self):\n print(\"Running {} simulations.\".format(self.num_simulations))\n print(\"{0:2}% bias for men\".format(self.promotion_bias))\n print(\"{0:2} promotion cycles\".format(self.iterations_per_simulation))\n print(\"{0:2}% attrition rate\".format(self.attrition))\n print", "def printhelp():", "def print_(self, s: str) -> None:", "def print(self) -> None:\n\n print(\"Name: {}\".format(self.name))\n print(\"Input Queue: {}\".format(self.input_queue))\n print(\"Output Queue: {}\".format(self.output_queue))\n print(\"Restart Required: {}\".format(str(self.restart_required)))\n print(\"Number of Processes: {}\".format(str(self.num_processes)))\n print(\"Process Job: {}\".format(self.process_job.__name__))\n print(\"Timeout Duration: {}\".format(str(self.timeout_duration)))\n self.print_process_list()", "def do_show(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n print(obj)", "def printout(string):\n print(string)", "def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "def display_hangman(self):\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(self.progress + Style.RESET_ALL)\n print('\\n')", "def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def print(text):\n\n return builtin_print('{} | {}'.format(\n time.strftime('%H:%M:%S', time.gmtime()),\n text\n ))", "def show(self):\n\n print(self._walk(self, depth=1))", "def run(self):\n logging.debug('Displaying Info: ' + self.recipe.name)\n\n msg = PREFIX[1:] + PREFIX.join(self.recipe.info().split('\\n'))\n print(msg)\n return msg", "def printMe():\n\n print(\"Meeeeeee!\")", "def display(self,message):\r\n \r\n print(message)", "def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))", "def spew(self):\n for frame in self.frames:\n print frame.func, frame", "def print(self):\n\n print(self)", "def console(self, vm=None):\n raise NotImplementedError\n return \"\"", "def br(cls):\n term_width = get_terminal_width()\n\n if hasattr(cls, 'info'):\n cls.info('-' * term_width)\n else:\n print('-' * term_width)" ]
[ "0.7084671", "0.67312264", "0.6478312", "0.6462846", "0.64194113", "0.6415898", "0.6407844", "0.6329717", "0.630891", "0.63030106", "0.62839437", "0.6273448", "0.6249168", "0.61708874", "0.61392146", "0.6125898", "0.6113273", "0.61126554", "0.61101013", "0.6100479", "0.609391", "0.60599005", "0.6027059", "0.6014493", "0.59612733", "0.5949467", "0.59388554", "0.5929722", "0.5922616", "0.5917826", "0.5915018", "0.5895304", "0.58932567", "0.5888682", "0.5879076", "0.5852637", "0.58518887", "0.5845818", "0.58197427", "0.5815509", "0.581441", "0.58069146", "0.5804937", "0.58003926", "0.5795833", "0.5786301", "0.57555115", "0.5754944", "0.5747232", "0.57458735", "0.57448673", "0.5732716", "0.5731358", "0.57234746", "0.572122", "0.57191426", "0.57191426", "0.5718518", "0.57180816", "0.5710347", "0.57088345", "0.5708402", "0.5700567", "0.5699961", "0.56916296", "0.5690912", "0.56876165", "0.5686628", "0.5684875", "0.5684097", "0.5675259", "0.5673532", "0.56714636", "0.567125", "0.56693614", "0.5663493", "0.5662185", "0.56597954", "0.5638522", "0.5637906", "0.5636644", "0.5632781", "0.5632561", "0.5629126", "0.56259674", "0.5621874", "0.5614039", "0.5613517", "0.5610851", "0.5605608", "0.56034756", "0.5601655", "0.5597128", "0.5589323", "0.55866355", "0.5585909", "0.5585599", "0.5583817", "0.5583579", "0.5580666", "0.5579013" ]
0.0
-1
Function to test the code separately from the Virtual River.
def test(): root_path = os.path.dirname(os.path.realpath(__file__)) test_path = os.path.join(root_path, 'test_files') with open(os.path.join(test_path, 'hexagons0.geojson')) as f: hexagons_old = load(f) with open(os.path.join(test_path, 'hexagons1.geojson')) as f: hexagons_new = load(f) return hexagons_new, hexagons_old
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_01_lighting(self):", "def _lv_test(self):\n raise NotImplementedError('Levene Test is not implemented')", "def runTest(self):\n E = main()\n self.assertInside(E, energy, 1e-5)", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def runtest(self):", "def test(self):\n pass", "def test_post_voltage_maps(self):\n pass", "def test():\n pass", "def test_init_with_fire_villan(self):\n pass", "def test(self):\n raise NotImplementedError", "def test_bed(self):\n #TODO write bed tests", "def main():\n testlib = VorpatestLibrary()\n testlib.prepare_test()\n testlib.run_vorpaline(*sys.argv[1:])\n testlib.run_vorpastat()\n testlib.cleanup_test()", "def test():", "def test():", "def test_alien_data(self):", "def tests():", "def test_robot(r, c):\n return r['x'] == c or r['y'] == c\n \n \n \n \n \n \n \n \n \n # Make sure tests run when this module is run", "def unitary_test():", "def test_01_visit(self):", "def run_test(self):\n raise NotImplementedError", "def test_ipam_vrfs_update(self):\n pass", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_get_voltage_maps(self):\n pass", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def inner_test():\n pass", "def inner_test():\n pass", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def test_theft_and_stealing(self):", "def runTest(self):\n return True", "def test_T4():", "def test_T4():", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def test(self) -> Any:\n pass", "def test(self):", "def test(self):", "def test_arc_smear(self):", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def test_predictor():", "def testRobotInterface():\n\n print\n print \"TEST: simulation environment\"\n print \"isSimulated =\", robot.isSimulated()\n print\n\n print \"TEST: shutdown flag\"\n print \"isShutDown =\", robot.isShutDown()\n assert(robot.isShutDown() == False)\n print\n\n print \"TEST: own robot ID\"\n print robot.myRobotId()\n print\n\n print \"TEST: stop (nothing should happen)\"\n robot.stop()\n print\n\n print \"TEST: enable ballHandlers\"\n robot.enableBallHandlers()\n sleep(1)\n print\n\n print \"TEST: disable ballHandlers\"\n robot.disableBallHandlers()\n sleep(1)\n print\n\n print \"TEST: velocity setpoint (half rotation)\"\n robot.setVelocity(0, 0, 1, 3.14)\n print\n\n print \"TEST: get current position\"\n pos = robot.getPosition()\n print \"position =\", pos\n print\n\n print \"TEST: get current velocity\"\n vel = robot.getVelocity()\n print \"velocity =\", vel\n print\n\n print \"TEST: move a bit\"\n robot.move(pos.x, pos.y + 0.5, pos.Rz)\n print\n\n print \"TEST: which robots are active\"\n print robot.activeRobots()\n print\n\n print \"TEST: teammembers are all robots except self\"\n teamMembers = robot.teamMembers()\n print teamMembers\n assert(robot.myRobotId() not in teamMembers)\n print\n\n print \"TEST: relative index\"\n print robot.myRelIndex()\n print\n\n print \"TEST: ball possession as enum\"\n print robot.ballPossession()\n print\n\n print \"TEST: ball possession as boolean\"\n print robot.hasBall()\n print\n\n print \"TEST: does team see a ball\"\n print robot.seeBall()\n print\n\n print \"TEST: closest obstacle\"\n print robot.findClosestObstacle(1, 6)\n print\n\n print \"TEST: robot close to penalty spot\"\n print (robot.robotCloseBy(0, 6, 2.0) or robot.robotCloseBy(0, -6, 2.0))\n print\n\n if robot.seeBall():\n print \"TEST: ball position\"\n print robot.ballPosition()\n print\n\n print \"TEST: ball velocity\"\n print robot.ballVelocity()\n print\n\n print \"TEST: ball on same half\"\n print robot.ballOnSameHalf()\n print\n\n print \"TEST: ball close to penalty spot\"\n print (robot.ballCloseBy(0, 6, 2.0) or robot.ballCloseBy(0, -6, 2.0))\n print\n\n print \"TEST: get ball\"\n robot.getBall()\n sleep(0.1) # give async wm thread a bit of time ...\n assert(robot.hasBall())\n print", "def test_alive():\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def test_let(self):", "def runTests(self):\n \n pass", "def test_T01():", "def test_vargs(self):", "def test_ipam_vrfs_create(self):\n pass", "def probe(self):", "def test_basic_execution(self):", "def test_uparforvarg(self):", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def test_brains_get(self):\n pass", "def main():\n init_node(\"descartes_teleop_testing\")\n # Assign inspection parameters for the class\n params = {'world': \"/table_link\",\n 'group': \"left_ur5\",\n 'clear': True,\n 'print_messages': True,\n 'debug': False}\n\n teleop = VFNavigation(params=params)\n teleop.define_inspection(client='polar_camera',\n inspection_name='ar_2_camera')\n for key in ['y+', 'y-', 'z+', 'z-', 'x+', 'x-', 'x+', 'x+']:\n response = teleop.pose_nav(command=key)\n Me.info_message(response)", "def test_something():", "def test_emirp_check():\r\n pass", "def testing(self):\n print('test successful')", "def main():\n # Call testing function\n testMinivan()", "def test_get_waivers(league):\n pass", "def setUp(self):\n self.validate_with = outside_data.Bed(source=\"get_val_ven_test.bed\", files_path=self.files_path,\n folder=\"Validation_files\")\n self.vencode_obj = iext.GetVencodes(validate_with=(self.validate_with, (\":\", r\"\\..\", \",\")),\n inputs=self.inputs,\n files_path=self.files_path,\n cell_type=self.celltype_analyse,\n algorithm=self.algorithm,\n n_regulatory_elements=self.k,\n number_vencodes=4,\n thresholds=self.thresholds, n_samples=10000,\n merge={\"replicate_suffix\": self.replicate_suffix})\n self.vencodes = self.vencode_obj.coordinates", "def run_tests():\n good_car = UnreliableCar(\"Good Car\", 100, 90)\n bad_car = UnreliableCar(\"Bad Car\", 100, 10)\n\n for i in range(1, 15):\n print(\"Attempting to drive {}km:\".format(i))\n print(\"{:12} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:12} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n\n \"\"\"final states of the cars\"\"\"\n print(good_car)\n print(bad_car)", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def test():\r\n pass", "def test_script(self) -> None:\n main()", "def test_code005(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code005 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')\n\n # specific VNF recovery monitoring, specific metrics if any\n # interact with ONAP, periodic query about VNF status; may also check VM or container status directly with VIM\n # return when VNF is recovered\n # may provision for failure to recover (max time to wait; return code: recovery OK boolean)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n # retrieved status values: {'ACTIVE', 'SUSPENDED'}\n # loop: wait 2 seconds, check status, stop loop when status is ACTIVE\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n test_VM_current_status = test_VM.status\n wait_seconds = 2\n nb_seconds_waited = 0\n while test_VM_current_status != 'ACTIVE':\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)\n test_VM = conn.compute.get_server(test_VM_ID) # need to get VM object ID, for an updated status attribute\n test_VM_current_status = test_VM.status\n nb_seconds_waited = nb_seconds_waited + wait_seconds\n print(' nb_seconds_waited=',nb_seconds_waited)", "def test_ipam_vrfs_list(self):\n pass", "async def test_routine(self):\n print('Running test routine...')\n print('Waiting for axes to initialize...')\n await self.robot.wait_until_initialized()\n print('Synchronizing robot state with peripheral...')\n await self.robot.synchronize_values()\n print('Loading calibration data...')\n await self.robot.load_calibrations()\n await self.robot.go_to_alignment_hole()\n\n print('Starting 96-well plate test...')\n await self.robot.go_to_96_well_plate(1, 'a')\n await self.robot.dispense('96-well plate', 'far above')\n for height in ['bottom', 'low', 'mid', 'high', 'top', 'above', 'far above']:\n print('Testing with height {}...'.format(height))\n for (row, volume) in [('a', 20), ('b', 30), ('c', 40), ('d', 50), ('e', 100)]:\n print(\n ' Testing precise with row {} and volume {} mL...'\n .format(row, volume)\n )\n await self.test_individual_precise(row, height, volume / 1000)\n await self.robot.dispense('96-well plate', height)\n for (row, volume) in [\n ('f', 100), ('g', 150), ('h', 200), ('a', 300), ('b', 400),\n ('c', 500), ('d', 600), ('e', 700), ('g', 800), ('h', 900)\n ]:\n print(\n ' Testing rough with row {} and volume {} mL...'\n .format(row, volume / 1000)\n )\n await self.test_individual_rough(row, height, volume / 1000)\n await self.robot.z.go_to_high_end_position()\n await self.robot.y.go_to_low_end_position()\n\n print(batch.OUTPUT_FOOTER)\n print('Quitting...')", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def test_03_visit_special(self):", "def test_stub(self):\n pass", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def test_VoltageSourcePolarity(self):\n V=VoltageSource.VoltageSource(5,name=\"SRC\")\n\n gnd=AbsoluteVoltage.AbsoluteVoltage(0,name=\"gnd\")\n probe=VoltageProbe.VoltageProbe(name=\"probe\")\n\n V.connect(gnd,\"positive\")\n V.connect(probe,\"negative\")\n\n self.assertEqual( probe.getVoltageResistance(), (-5.0,0.0) )", "def runTest(self):\n self.setUp()\n self.test_SegmentDicom1()", "def test_4():", "def inner_test(param: models.Game):\n pass", "def runRobot():", "def test_if(self):", "def test_get_solution(self):\n pass", "def runTest(self):\r\n self.setUp()\r\n self.test_SegmentEditor1()", "def setUpClass(cls):\n cls.use_temp_region()\n cls.runModule(\"g.region\", raster=\"elev_state_500m\")", "def test_required_methods(self):", "def test_get_run(self):\n pass", "def test_T2():", "def test_T2():", "def test_drive(self):\n global ENV, TRAFFIC_LIGHT\n ENV = simpy.Environment()\n TRAFFIC_LIGHT = TrafficLight()\n bus = Bus(nr=0)\n ENV.process(bus.drive())\n ENV.run()\n self.assertEqual(bus.movement.to_pos, 600)", "def RUN(self):", "def test_subsystems(self):\n pass", "def main():\n run_test_draw_upside_down_wall()", "def local_test():\n pass", "def test(): # TO BE DELETED WHEN PROGRAM COMPLETED\n print('methode test')", "def setUp(self):\n\n self.veh = Vehicle(0, 0)\n self.R = Random(seed)", "def run_tests(self):\n raise NotImplementedError", "def test_testing():\n Pendulum = pu.Pendulum()\n ans = Pendulum.dummytest()\n assert ans", "def test_for_client():" ]
[ "0.6968613", "0.6791529", "0.6691997", "0.6609146", "0.6609146", "0.6609146", "0.6609146", "0.6609146", "0.6608754", "0.6608754", "0.6608754", "0.65086514", "0.6462126", "0.6446393", "0.6410319", "0.63936573", "0.6387084", "0.6344221", "0.63053054", "0.6279565", "0.6279565", "0.62661207", "0.62620705", "0.62610716", "0.6255007", "0.6248341", "0.624262", "0.6231664", "0.62218285", "0.6175254", "0.61490244", "0.6147542", "0.61460215", "0.61460215", "0.6132942", "0.6123282", "0.6117202", "0.6111127", "0.6111127", "0.6100391", "0.60926324", "0.6091974", "0.6091974", "0.6079245", "0.6046718", "0.6042331", "0.6023174", "0.60053986", "0.60053986", "0.60053986", "0.599872", "0.5990738", "0.59889513", "0.5987917", "0.5977351", "0.59767455", "0.5968978", "0.59551936", "0.5949117", "0.594131", "0.59358627", "0.5925304", "0.5915871", "0.5913733", "0.59133863", "0.5894879", "0.5867131", "0.5863864", "0.58602667", "0.58591497", "0.5855502", "0.58503556", "0.5844037", "0.583868", "0.5838598", "0.58229464", "0.5821654", "0.5819868", "0.5812618", "0.5808554", "0.58068883", "0.5797749", "0.57968473", "0.57924294", "0.5791044", "0.5790285", "0.57814246", "0.57752377", "0.5775053", "0.57713413", "0.57713413", "0.57681423", "0.5764771", "0.5760768", "0.5760427", "0.5758573", "0.57571536", "0.57562613", "0.5755301", "0.57462555", "0.57458544" ]
0.0
-1
Run variant ensembl predictor alone with custom options. See options details at
def run_vep_annotator(vep_data: str, vcf_path: str, out_path: str, fasta: str, vep_custom: Union[str,list]=None, overwrite: bool=False, vep_n_fork: int=4): vep_path = os.path.normpath(os.path.join(__file__, "../../tools/ensembl-vep/vep")) need_run = True if os.path.exists(out_path) and not overwrite: need_run = False if need_run: print("STATUS: RUNNING VEP") if os.path.exists(out_path): os.remove(out_path) print("removed existing file: %s" % out_path) cmd = """%s \ --dir %s \ --af \ --af_gnomad \ --af_esp \ --clin_sig_allele 0 \ --max_af \ --af_1k \ --no_progress \ --no_stats \ --appris \ --biotype \ --buffer_size 500 \ --canonical \ --ccds \ --check_existing \ --distance 5000 \ --hgvs \ --fork %s \ --numbers \ --mane \ --pick \ --polyphen b \ --protein \ --pubmed \ --regulatory \ --sift b \ --species homo_sapiens \ --symbol \ --transcript_version \ --tsl \ --uniprot \ --input_file %s \ --output_file %s \ --fasta %s \ --cache \ --offline """ % (vep_path, vep_data, vep_n_fork, vcf_path, out_path, fasta) if vep_custom is not None: if type(vep_custom) == list: for v_custom in vep_custom: cmd += "--custom %s " % v_custom elif type(vep_custom) == str: cmd += "--custom %s " % vep_custom else: raise ValueError("vep_custom should be of type list or str") os.system(cmd) else: print("output file %s already exists and overwrite is set to False" % out_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main() -> None:\n parser = ArgumentParser(description=\"SoccerPredictor:\", formatter_class=ArgumentDefaultsHelpFormatter)\n subparsers = parser.add_subparsers(title=\"Modes to run\", dest=\"command\")\n\n # Trainer args\n trainer_parser = subparsers.add_parser(RunMode.Train.value, help=\"Trains model and makes predictions.\",\n formatter_class=ArgumentDefaultsHelpFormatter)\n trainer_parser.add_argument(\"--resume\", action=\"store_true\", default=False,\n help=\"Resumes training of previously saved model. \"\n \"Tries to load the latest model saved if no name or prefix specified via --name.\")\n trainer_parser.add_argument(\"--epochs\", type=int, action=\"store\", default=1,\n help=\"Number of epochs to train model for.\")\n trainer_parser.add_argument(\"--ntest\", type=int, action=\"store\", default=10,\n help=\"Number of last samples used for testing for each team.\")\n trainer_parser.add_argument(\"--ndiscard\", type=int, action=\"store\", default=0,\n help=\"Number of last samples to discard for each team.\")\n trainer_parser.add_argument(\"--timesteps\", type=int, action=\"store\", default=40,\n help=\"Number of timesteps to use as data window size for input to network.\")\n trainer_parser.add_argument(\"--predict\", action=\"store_true\", default=False,\n help=\"Whether to rerun predictions without any training.\")\n trainer_parser.add_argument(\"--lrpatience\", type=int, action=\"store\", default=20,\n help=\"How many epochs to tolerate before decaying learning rate if no improvement. \"\n \"Turned off if 0.\")\n trainer_parser.add_argument(\"--lrdecay\", type=float, action=\"store\", default=0.95,\n help=\"How much to decay learning rate after patience exceeded.\")\n trainer_parser.add_argument(\"--seed\", type=int, action=\"store\",\n help=\"Specifies seed for rng.\")\n trainer_parser.add_argument(\"--savefreq\", type=int, action=\"store\", default=50,\n help=\"How often (number of epochs) to save models. No intermediate saving if 0.\")\n trainer_parser.add_argument(\"--printfreq\", type=int, action=\"store\", default=10,\n help=\"How often (number of epochs) to print current summaries. \"\n \"No intermediate printing if 0.\")\n trainer_parser.add_argument(\"--verbose\", type=int, action=\"store\", choices=VERBOSITY_LEVELS, default=1,\n help=\"Level of verbosity.\")\n\n # Visualizer args\n visualizer_parser = subparsers.add_parser(RunMode.Vis.value, help=\"Runs visualization of predictions.\",\n formatter_class=ArgumentDefaultsHelpFormatter)\n visualizer_parser.add_argument(\"--port\", type=int, action=\"store\", default=8050,\n help=\"Custom port for Dash visualization.\")\n visualizer_parser.add_argument(\"--host\", type=str, action=\"store\", default=\"127.0.0.1\",\n help=\"Custom host for Dash visualization. Can use 0 for 0.0.0.0 shortcut.\")\n\n # Backtester args\n backtester_parser = subparsers.add_parser(RunMode.Backtest.value, help=\"Runs backtesting on trained models.\",\n formatter_class=ArgumentDefaultsHelpFormatter)\n backtester_parser.add_argument(\"--path\", type=str, action=\"store\", default=f\"{DATA_DIR}{MODEL_DIR}\",\n help=\"Path to folder where the trained models are saved.\")\n\n # common args for trainer and visualizer\n for p in [trainer_parser, visualizer_parser]:\n p.add_argument(\"--name\", type=str, action=\"store\",\n help=\"Tries to load the latest saved model with given name prefix. \"\n \"Loads exact model if exact dir name specified.\")\n\n # Common args for visualizer and backtester\n for p in [visualizer_parser, backtester_parser]:\n p.add_argument(\"--ignoreodds\", type=float, action=\"store\", default=1.10,\n help=\"Ignores odds less than given amount when predicting which team to bet on.\")\n\n args = parser.parse_args()\n\n if args.command is None:\n parser.print_help()\n return\n elif args.command == RunMode.Vis.value:\n print(\"Visualizing...\")\n vis_args, _ = visualizer_parser.parse_known_args()\n check_visualizer_args(visualizer_parser, vis_args)\n print(vis_args)\n\n from soccerpredictor.visualizer import visualizer\n try:\n visualizer.run(vis_args.name, vis_args.host, vis_args.port, vis_args.ignoreodds)\n except KeyboardInterrupt:\n print(\"> Received CTRL+C command. Exiting.\")\n elif args.command == RunMode.Backtest.value:\n print(\"Backtesting...\")\n backtest_args, _ = backtester_parser.parse_known_args()\n check_backtester_args(backtester_parser, backtest_args)\n print(backtest_args)\n\n from soccerpredictor.backtester import backtester\n try:\n backtester.run(backtest_args.path, backtest_args.ignoreodds)\n except KeyboardInterrupt:\n print(\"> Received CTRL+C command. Exiting.\")\n elif args.command == RunMode.Train.value:\n print(\"Running model...\")\n train_args, _ = trainer_parser.parse_known_args()\n config = SPConfig()\n\n # Implicitly set resume to true if we are predicting only\n if train_args.predict and not train_args.resume:\n train_args.resume = True\n\n check_trainer_args(trainer_parser, train_args)\n config.set_args(train_args)\n print(train_args)\n\n dbmanager = SPDBManager()\n try:\n dbmanager.connect()\n\n # Load previous settings if we resume training\n if train_args.resume:\n print(\"Resuming training, loading previous settings... \"\n \"Any conflicting parameters will be ignored.\")\n\n # Load previous settings\n folder = get_latest_models_dir(train_args.name)\n model_settings = get_model_settings_file(folder)\n # Restore original config\n config.restore_args(model_settings)\n set_rng_seed(config.seed)\n\n from soccerpredictor.trainer.trainer import SPTrainer\n trainer = SPTrainer(dbmanager, model_settings=model_settings, folder=folder)\n else:\n # Need to generate folder prefix before seeding random number generators\n import random\n generated_folder_prefix = \"\".join(random.choices(ascii_uppercase, k=FOLDER_PREFIX_LEN))\n print(f\"New generated folder prefix: '{generated_folder_prefix}'\")\n set_rng_seed(train_args.seed)\n\n from soccerpredictor.trainer.trainer import SPTrainer\n trainer = SPTrainer(dbmanager, generated_folder_prefix=generated_folder_prefix)\n\n try:\n trainer.run()\n finally:\n trainer.cleanup()\n\n except KeyboardInterrupt:\n print(\"> Received CTRL+C command. Exiting.\")\n except (FileNotFoundError, ValueError, OperationalError) as e:\n print(e)\n sys.exit(1)\n finally:\n dbmanager.disconnect()", "def cli(sys_argv: List[str]):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_definition', type=str,\n help='Path to json model definition')\n\n parser.add_argument('--model_state_path', type=str,\n help='Path where to the trained parameters')\n\n parser.add_argument('--data_path', type=str, default=TEST_PATH,\n help='path to the pickled dataframe on which prediction should be made')\n\n parser.add_argument('--numerical_preprocessor', type=str, default=NUMERICAL_PREPROCESSOR_SAVE_PATH,\n help='Path of the saved numerical preprocessor')\n\n parser.add_argument('--categorical_preprocessor', type=str, default=CATEGORICAL_PREPROCESSOR_SAVE_PATH,\n help='Path to the saved categorical preprocessor')\n\n parser.add_argument('--output_directory', type=str, default=RESULTS_DIR,\n help='Path where to save the prediction of the experiment')\n\n args = parser.parse_args(sys_argv)\n\n # # ---------- parse config file ---------- # #\n config: dict = json.load(open(args.model_definition, 'r'))\n\n model_class: str = config['model_class']\n model_name: str = config['model_name']\n numerical_input_features: List[str] = config['data']['numerical_input_features']\n categorical_input_features: List[str] = config['data']['categorical_input_features']\n output_features: List[str] = config['data']['output_features']\n batch_size_test: int = config['data']['batch_size_test']\n\n device = torch.device(CUDA if torch.cuda.is_available() else CPU)\n\n # # ---------- parse model state ---------- # #\n model_state = load_model_state(args.model_state_path, device)\n\n model_hyperparameters: dict = model_state['hyperparameters']\n model_hyperparameters.update(config['model'])\n model_hyperparameters['device']: torch.device = device\n model_weights: dict = model_state['best_model_state_dict']\n\n # # ---------- initialize model ---------- # #\n model = REGISTERED_MODELS[model_class](**model_hyperparameters).to(device)\n model.load(model_weights)\n\n # # ---------- preprocess data for inference ---------- # #\n test_loader = preprocess_for_inference(\n args.data_path,\n numerical_input_features,\n categorical_input_features,\n output_features,\n args.numerical_preprocessor,\n args.categorical_preprocessor,\n batch_size_test=batch_size_test\n )\n\n # # ---------- compute and save predictions ---------- # #\n predictions = model.predict(test_loader)\n\n # save predictions\n data_file_name = os.path.basename(args.data_path)\n data_file_name = os.path.splitext(data_file_name)[0] # remove extension\n model_path = '{}/predictions_{}_{}.pickle'.format(args.output_directory, model_name, data_file_name)\n print(' [predict] Saving predictions at: `{}`'.format(model_path))\n file_utils.save_to_pickle(\n predictions,\n path=model_path\n )\n print(' [predict] Done')", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def main():\n args = parse_args(sys.argv[1:])\n\n if args.version:\n print(birdvoxclassify.version.version)\n return\n\n if args.quiet:\n logger_level = 30\n elif args.verbose:\n logger_level = 20\n else:\n logger_level = 25\n\n run(args.inputs,\n output_dir=args.output_dir,\n output_summary_path=args.output_summary_path,\n model_name=args.model_name,\n batch_size=args.batch_size,\n select_best_candidates=args.select_best_candidates,\n hierarchical_consistency=args.hierarchical_consistency,\n suffix=args.suffix,\n logger_level=logger_level)", "def main():\n config = handle_args()\n print(config)\n\n if config['mode'] == 'train':\n logger.info('Train mode')\n trainer = Trainer(config, shared_theano_params=None, **config)\n trainer.train(**config)\n logger.warn('Exiting train mode')\n else:\n logger.info('Test mode')\n translator = Translator(**config)\n translator.load_from_disk(config['models'], config['configs'],\n config['src_dicts'], config['trg_dict'])\n translator.translate_and_save(**config)", "def main():\n parser = argparse.ArgumentParser(description='Paperboy deep learning launcher')\n\n parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run')\n parser.add_argument('command', metavar='COMMAND', help='A command to run')\n parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command')\n parser.add_argument('-r', '--run_number', type=int, default=0, help=\"A run number\")\n parser.add_argument('-d', '--device', default='cuda', help=\"A device to run the model on\")\n parser.add_argument('-s', '--seed', type=int, default=None, help=\"Random seed for the project\")\n parser.add_argument(\n '-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[],\n help=\"Configuration parameters\"\n )\n parser.add_argument(\n '--continue', action='store_true', default=False, help=\"Continue previously started learning process\"\n )\n parser.add_argument(\n '--profile', type=str, default=None, help=\"Profiler output\"\n )\n\n args = parser.parse_args()\n\n model_config = ModelConfig.from_file(\n args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed,\n params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}\n )\n\n if model_config.project_dir not in sys.path:\n sys.path.append(model_config.project_dir)\n\n multiprocessing_setting = model_config.provide_with_default('multiprocessing', default=None)\n\n if multiprocessing_setting:\n # This needs to be called before any of PyTorch module is imported\n multiprocessing.set_start_method(multiprocessing_setting)\n\n # Set seed already in the launcher\n from vel.util.random import set_seed\n set_seed(model_config.seed)\n\n model_config.banner(args.command)\n\n if args.profile:\n print(\"[PROFILER] Running Vel in profiling mode, output filename={}\".format(args.profile))\n import cProfile\n import pstats\n profiler = cProfile.Profile()\n profiler.enable()\n model_config.run_command(args.command, args.varargs)\n profiler.disable()\n\n profiler.dump_stats(args.profile)\n profiler.print_stats(sort='tottime')\n\n print(\"======================================================================\")\n pstats.Stats(profiler).strip_dirs().sort_stats('tottime').print_stats(30)\n print(\"======================================================================\")\n pstats.Stats(profiler).strip_dirs().sort_stats('cumtime').print_stats(30)\n else:\n model_config.run_command(args.command, args.varargs)\n\n model_config.quit_banner()", "def predict(options):\n\n # Start marker for time measure\n start = time.time()\n\n print(\"\\n\\t\\t------------------------------------------------------------------------------------------------------\\n\")\n print(\"\\t\\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Third part: Prediction\\n\")\n print(\"\\t\\t------------------------------------------------------------------------------------------------------\\n\")\n\n # Get the script path\n main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n # Read the config file\n config_file = os.path.join(main_path, 'config.ini')\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n\n # Check that the other_data directory exists\n other_data_dir = os.path.join(main_path, 'workspace/other_data')\n check_directory(other_data_dir)\n\n # Table with the results of DIANA\n data_frame_file = os.path.join(other_data_dir ,'all_results_3targets.csv')\n\n # Load data frame\n df = pd.read_csv(data_frame_file, index_col=0)\n\n dc_data = df[df['Comb'] == 1]\n num_dc = len(dc_data.index)\n print('Number of drug combinations with at least 3 targets: {}\\n'.format(num_dc))\n\n # Replace the None values in Struc by nan\n df = df.replace(to_replace={'Struc':{'None':np.nan}})\n # Replace the NA values in Struc by nan\n df = df.replace(to_replace={'Struc':{'NA':np.nan}})\n\n # Deleting the spearman for seeds. It is useless\n df = df.drop('SNsp', axis=1)\n df = df.drop('SPsp', axis=1)\n\n\n\n # End marker for time\n end = time.time()\n print('\\n DIANA INFO:\\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\\n'.format(end - start, (end - start) / 60))\n\n return", "def main():\n\tparser = argparse.ArgumentParser(\n\t\tusage = '%(prog)s [OPTIONS] [ARGS...]',\n\t\tdescription='Calculate something',\n\t\tepilog='Contact simon.clematide@uzh.ch'\n\t\t)\n\tparser.add_argument('--version', action='version', version='0.99')\n\tparser.add_argument('-l', '--logfile', dest='logfile',\n\t\t\t\t\t\thelp='write log to FILE', metavar='FILE')\n\tparser.add_argument('-q', '--quiet',\n\t\t\t\t\t\taction='store_true', dest='quiet', default=False,\n\t\t\t\t\t\thelp='do not print status messages to stderr')\n\tparser.add_argument('-d', '--debug',\n\t\t\t\t\t\taction='store_true', dest='debug', default=False,\n\t\t\t\t\t\thelp='print debug information')\n\tparser.add_argument('-s', '--lm_dir',\n\t\t\t\t\t\taction='store', dest='lm_dir', default='resources.d/taggers/language-model/',\n\t\t\t\t\t\thelp='directory where LMs are stored %(default)')\n\tparser.add_argument('-i', '--iob_dir',\n\t\t\t\t\t\taction='store', dest='iob_dir', default='data.d/quaero/quaero_iob',\n\t\t\t\t\t\thelp='directory where iob training material is located %(default)')\n\tparser.add_argument('-t', '--tagger_dir',\n\t\t\t\t\t\taction='store', dest='tagger_dir', default='resources.d/taggers',\n\t\t\t\t\t\thelp='directory where to store training output %(default)')\n\tparser.add_argument('-n', '--ner_cycle',\n\t\t\t\t\t\taction='store', dest='ner_cycle', default='ner',\n\t\t\t\t\t\thelp='ner experiment cycle %(default)')\n\tparser.add_argument('-c', '--correction_mode',\n\t\t\t\t\t\taction='store', dest='correction_mode', default='raw',\n\t\t\t\t\t\thelp='correction mode of the NEs in training data %(default)')\n\tparser.add_argument('-m', '--lm_domain',\n\t\t\t\t\t\taction='store', dest='lm_domain', default='pressfr',\n\t\t\t\t\t\thelp='character level language model domain %(default)')\n\tparser.add_argument('-p', '--train_patience',\n\t\t\t\t\t\taction='store', dest='train_patience', type=int, default=3,\n\t\t\t\t\t\thelp='training patience %(default)')\n\tparser.add_argument('-W', '--use_wiki_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_wiki_wordemb', default=False,\n\t\t\t\t\t\thelp='use pre-trained wiki word embeddings')\n\tparser.add_argument('-P', '--use_press_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_press_wordemb', default=False,\n\t\t\t\t\t\thelp='use indomain press word embeddings')\n\tparser.add_argument('-C', '--use_crf',\n\t\t\t\t\t\taction='store_true', dest='use_crf', default=False,\n\t\t\t\t\t\thelp='use CRF layer')\n\tparser.add_argument('args', nargs='*')\n\toptions = parser.parse_args()\n\tif options.logfile:\n\t\tlogging.basicConfig(filename=logfile)\n\tif options.debug:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\n\ttrain_tagger(options)", "def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")", "def main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--lr\", type=float, default=12e-3)\r\n parser.add_argument(\"--bs\", type=int, default=128)\r\n parser.add_argument('--fp16', action='store_true', help='Run model fp16 mode.')\r\n parser.add_argument('--not_distrib', action='store_true', help='Run model fp16 mode.')\r\n parser.add_argument('--loss_scale', type=float, default=1)\r\n parser.add_argument(\"--local_rank\", type=int)\r\n arg = parser.parse_args()\r\n torch.cuda.set_device(arg.local_rank)\r\n if not arg.not_distrib: torch.distributed.init_process_group('nccl', init_method='env://')\r\n main_train(arg.lr, arg.bs, arg.local_rank, arg.not_distrib, arg.fp16, arg.loss_scale)", "def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='mode')\n\n # Add sub-parser for feature extraction\n parser_extract = subparsers.add_parser('extract')\n parser_extract.add_argument('dataset',\n choices=['training', 'validation', 'test'],\n )\n\n # Add sub-parser for training\n subparsers.add_parser('train')\n\n # Add sub-parser for inference\n parser_predict = subparsers.add_parser('predict')\n parser_predict.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n\n # Add sub-parser for evaluation\n parser_evaluate = subparsers.add_parser('evaluate')\n parser_evaluate.add_argument('task',\n nargs='?',\n choices=['tagging', 'sed', 'all'],\n default='all',\n )\n parser_evaluate.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n parser_evaluate.add_argument('--thresholds', action='store_true')\n\n args = parser.parse_args()\n\n if args.mode == 'extract':\n extract(cfg.to_dataset(args.dataset))\n elif args.mode == 'train':\n train()\n elif args.mode == 'predict':\n predict(cfg.to_dataset(args.dataset))\n elif args.mode == 'evaluate':\n eval_all = args.task == 'all'\n dataset = cfg.to_dataset(args.dataset)\n if args.task == 'tagging' or eval_all:\n evaluate_audio_tagging(dataset, args.thresholds)\n if args.task == 'sed' or eval_all:\n evaluate_sed(dataset)", "def main():\n\n args = parse_cmd_line_args()\n\n random_state = check_random_state(args.random_seed)\n\n X, mu, A, phases = generate_data(args.n_features, n_samples=args.n_samples,\n period=args.period, order=args.order,\n noise_variance=args.noise_variance,\n random_state=random_state)\n\n if args.plot_data:\n plot_data_timeseries(X)\n\n best_fit, best_weights = fit_fembv_varx(\n X, n_components=args.n_components,\n max_tv_norm=args.max_tv_norm,\n memory=args.memory, n_init=args.n_init,\n tolerance=args.tolerance,\n max_iterations=args.max_iterations,\n verbose=args.verbose, random_state=random_state)\n\n if args.plot_weights:\n plot_weights_timeseries(best_weights, phases)", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def main():\n parser = argparse.ArgumentParser()\n\n # Add arguments to parser\n parser.add_argument(\n '-base_data_dir', default='../data',\n help='Root directory of data', type=str)\n parser.add_argument(\n '-dataset', default='litbank', choices=['litbank', 'ontonotes'], type=str)\n parser.add_argument('-base_model_dir',\n default='../models',\n help='Root folder storing model runs', type=str)\n parser.add_argument('-model_size', default='large', type=str,\n help='BERT model type')\n parser.add_argument('-doc_enc', default='overlap', type=str,\n choices=['independent', 'overlap'], help='BERT model type')\n parser.add_argument('-pretrained_bert_dir', default='../resources', type=str,\n help='SpanBERT model location')\n parser.add_argument('-max_segment_len', default=512, type=int,\n help='Max segment length of BERT segments.')\n parser.add_argument('-top_span_ratio', default=0.3, type=float,\n help='Ratio of top spans proposed as mentions.')\n\n parser.add_argument('-ment_emb', default='endpoint', choices=['attn', 'max', 'endpoint'],\n type=str)\n parser.add_argument('-max_span_width',\n help='Max span width', default=20, type=int)\n parser.add_argument('-mlp_depth', default=1, type=int,\n help='Number of hidden layers in other MLPs')\n parser.add_argument('-mlp_size', default=3000, type=int,\n help='MLP size used in the model')\n\n parser.add_argument('-cross_val_split', default=0, type=int,\n help='Cross validation split to be used.')\n parser.add_argument('--batch_size', '-bsize',\n help='Batch size', default=1, type=int)\n parser.add_argument('-num_train_docs', default=None, type=int,\n help='Number of training docs.')\n parser.add_argument('-dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-max_epochs',\n help='Maximum number of epochs', default=25, type=int)\n parser.add_argument('-seed', default=0,\n help='Random seed to get different runs', type=int)\n parser.add_argument('-init_lr', help=\"Initial learning rate\",\n default=5e-4, type=float)\n parser.add_argument('-checkpoint', help=\"Use checkpoint\",\n default=False, action=\"store_true\")\n parser.add_argument('-eval', help=\"Evaluate model\",\n default=False, action=\"store_true\")\n parser.add_argument('-slurm_id', help=\"Slurm ID\",\n default=None, type=str)\n\n args = parser.parse_args()\n\n model_name = get_mention_model_name(args)\n print(model_name)\n\n model_dir = path.join(args.base_model_dir, model_name)\n args.model_dir = model_dir\n best_model_dir = path.join(model_dir, 'best_models')\n args.best_model_dir = best_model_dir\n if not path.exists(model_dir):\n os.makedirs(model_dir)\n if not path.exists(best_model_dir):\n os.makedirs(best_model_dir)\n\n if args.dataset == 'litbank':\n args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}/{args.cross_val_split}')\n else:\n args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}')\n\n # if args.dataset == 'ontonotes':\n # args.pretrained_model = path.join(\n # args.pretrained_mention_model_dir, f'mention_ontonotes_{args.model_size}_{args.ment_emb}.pt')\n # Log directory for Tensorflow Summary\n\n Experiment(**vars(args))", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main(self, options):\n raise NotImplementedError", "def main(_):\n hps = LM.get_default_hparams().parse(FLAGS.hpconfig)\n hps._set(\"num_gpus\", FLAGS.num_gpus)\n print ('*****HYPER PARAMETERS*****')\n print (hps)\n print ('**************************')\n\n vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, \"vocabulary.txt\"))\n\n if FLAGS.mode == \"train\":\n #hps.batch_size = 256\n dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"train.txt\"))\n run_train(dataset, hps, os.path.join(FLAGS.logdir, \"train\"), ps_device=\"/gpu:0\")\n elif FLAGS.mode.startswith(\"eval\"):\n data_dir = os.path.join(FLAGS.datadir, \"eval.txt\")\n #predict_model = prediction.Model('/dir/ckpt',os.path.join(FLAGS.datadir, \"vocabulary.txt\"), hps)\n\n dataset = Dataset(vocab, data_dir, deterministic=True)\n prefix_words = \"<brk>\".split()\n predict_model = predict.Model(hps, FLAGS.logdir, FLAGS.datadir)\n print ('start input')\n out = predict_model.predictnextkwords(prefix_words, FLAGS.num_sen)\n for row in out:\n print(' '.join(row) + \"\\n\")\n print(\"len_out: \" + str(len(out)))\n #prediction.topkwords(prefix_words, dataset, hps, FLAGS.logdir, FLAGS.mode)\n #sentence_ppl(prefix_words,dataset, hps, FLAGS.logdir, FLAGS.mode)\n #print vocab\n #dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"eval.txt\"))\n #run_eval(dataset, hps, FLAGS.logdir, FLAGS.mode, FLAGS.eval_steps)", "def main():\n argparser = ArgumentParser()\n argparser.add_argument('--case', type=int, required=True,\n help='case number to create observations e.g. 1 if 1.json')\n args = argparser.parse_args()\n\n case = args.case\n observation_file = os.path.join(OBSERVATION_DIR, '{}.json'.format(case))\n with open(observation_file, 'r') as f:\n observation_config = json.load(f)\n\n nodes = observation_config['nodes']\n edges = observation_config['edges']\n observations = observation_config['observations']\n\n # solution part\n parameters = _get_learned_parameters(nodes=nodes, edges=edges, observations=observations)\n # end solution part\n\n # json only recognises floats, not np.float, so we need to cast the values into floats.\n for node, node_params in parameters.items():\n for param, val in node_params.items():\n node_params[param] = float(val)\n parameters[node] = node_params\n\n if not os.path.exists(PREDICTION_DIR):\n os.makedirs(PREDICTION_DIR)\n prediction_file = os.path.join(PREDICTION_DIR, '{}.json'.format(case))\n\n with open(prediction_file, 'w') as f:\n json.dump(parameters, f, indent=1)\n print('INFO: Results for test case {} are stored in {}'.format(case, prediction_file))", "def use_args(args):\n global DATA_PATH\n global IMAGES_FILE\n global WORKING_DIR\n global OUTPUT_DIR\n global OUTPUT_FILE_NAME\n global OUTPUT_FILE\n global LOAD_INDEXES\n global INDEXES_DIR\n global MODEL\n global JOIN_MODELS\n global MODEL1\n global MODEL2\n global ALL_TOGETHER\n global TRAINED_MODELS\n global TRAINED_MODELS_DIR\n global TRAINED_MODELS_DIR2\n global TRAINED_MODELS_DIRS\n global CROSS_VALIDATION\n global TRAIN_EPOCHS\n global FEATURES\n \n if args.data_path:\n # Change the default path of the images\n DATA_PATH = args.data_path\n IMAGES_FILE = os.path.join(DATA_PATH, IMAGES_FILE_NAME)\n \n if args.working_dir:\n # Change the default path of the working directory\n WORKING_DIR = args.working_dir\n OUTPUT_DIR = WORKING_DIR\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output_dir:\n # Change the default path of the output directory\n OUTPUT_DIR = os.path.join(WORKING_DIR, args.output_dir)\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.output:\n # Change the default name of the output file\n OUTPUT_FILE_NAME = args.output\n OUTPUT_FILE = os.path.join(OUTPUT_DIR, OUTPUT_FILE_NAME)\n \n if args.indexes_dir:\n # Load random and train indexes from file\n LOAD_INDEXES = True\n INDEXES_DIR = args.indexes_dir\n \n if args.model:\n # Select model\n MODEL = args.model\n \n if args.models:\n \n if not args.trained_models_dirs:\n raise Exception(\"Arg. `-M --models` requires arg. \"\n + \"`-T --trained_models_dirs`\")\n \n # Models to combine\n JOIN_MODELS = True\n MODEL1 = args.models[0]\n MODEL2 = args.models[1]\n \n if args.trained_models_dir:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dir\n \n if args.trained_models_dirs:\n # Load trained models from file\n TRAINED_MODELS = True\n TRAINED_MODELS_DIR = args.trained_models_dirs[0]\n TRAINED_MODELS_DIR2 = args.trained_models_dirs[1]\n \n if args.all_together:\n # The four models together\n ALL_TOGETHER = True\n TRAINED_MODELS_DIRS = args.all_together\n \n if args.cross_validation:\n # Activate cross_validation\n CROSS_VALIDATION = True\n \n if args.train_epochs:\n # Change the default number of train epochs\n TRAIN_EPOCHS = args.train_epochs\n \n if args.features:\n # Nuber of best features to use\n FEATURES = args.features", "def main(args: argparse.Namespace, config: Config) -> None:\n # Notes:\n # - 1878 is the number of unique answers from the GQA paper\n # - 1843 is the number of answers across train, val and testdev\n\n # Download and initialise resources\n print(colored(\"initialisation:\", attrs=[\"bold\"]))\n stanza.download(lang=\"en\", dir=\".stanza\")\n\n # Print environment info\n print(colored(\"environment:\", attrs=[\"bold\"]))\n cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n print(f\"device: {torch.cuda.get_device_name(device) if cuda else 'CPU'}\")\n print(config)\n\n if args.job == JobType.PREPROCESS:\n preprocess(config)\n elif args.job in (JobType.TRAIN, JobType.PREDICT):\n resume = None\n if args.resume != \"\":\n run_id, checkpoint = args.resume.split(\":\")\n resume = ResumeInfo(run_id, checkpoint)\n if args.job == JobType.TRAIN:\n train(config, device, resume)\n else:\n predict(config, device, resume)\n else:\n raise NotImplementedError()", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def prediction_subparser(subparsers):\n\n parser = subparsers.add_parser('predict',\n help='Runs AMFinder in prediction mode.',\n formatter_class=RawTextHelpFormatter)\n\n x = PAR['tile_edge']\n parser.add_argument('-t', '--tile_size',\n action='store', dest='edge', type=int, default=x,\n help='Tile size (in pixels) used for image segmentation.'\n '\\ndefault value: {} pixels'.format(x))\n\n parser.add_argument('-sr', '--super_resolution',\n action='store_const', dest='super_resolution', const=True,\n help='Apply super-resolution before predictions.'\n '\\ndefault value: no super-resolution.')\n\n x = 'SRGANGenv1beta.h5'\n parser.add_argument('-g', '--generator',\n action='store', dest='generator', metavar='H5', type=str, default=x,\n help='name of the pre-trained generator.'\n '\\ndefault value: {}'.format(x))\n\n x = PAR['colormap']\n parser.add_argument('-map', '--colormap',\n action='store', dest='colormap', metavar='id', type=str, default=x,\n help='Name of the colormap used to display conv2d outputs and kernels.'\n '\\ndefault value: {}'.format(x))\n\n x = 'CNN1v2.h5'\n parser.add_argument('-net', '--network',\n action='store', dest='model', metavar='H5', type=str, default=x,\n help='name of the pre-trained model to use for predictions.'\n '\\ndefault value: {}'.format(x))\n\n parser.add_argument('-so', '--save_conv2d_outputs',\n action='store_const', dest='save_conv2d_outputs', const=True,\n help='save conv2d outputs in a separate zip file.'\n '\\ndefault value: False')\n\n parser.add_argument('-sk', '--save_conv2d_kernels',\n action='store_const', dest='save_conv2d_kernels', const=True,\n help='save convolution kernels in a separate zip file (takes time).'\n '\\ndefault value: False')\n\n x = PAR['input_files']\n parser.add_argument('image', nargs='*', default=x,\n help='plant root scan to be processed.'\n '\\ndefault value: {}'.format(x))\n\n return parser", "def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--maxsteps', type=int, default=100000)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--savefile', type=str, required=True)\n nproc = max(cpu_count() - 1, 1)\n parser.add_argument('--maxprocs', type=int, default=nproc)\n args = parser.parse_args()\n\n seed = args.seed\n np.random.seed(seed)\n venv = gen_vectorized_pong_env(args.maxprocs)\n policy = create_random_policy(venv)\n\n num_timesteps = 0\n paths = []\n while num_timesteps < args.maxsteps:\n print('{: 10d} of {: 10d} steps'.format(\n num_timesteps, args.maxsteps))\n new_paths = vsample(venv, policy)\n paths += new_paths\n num_timesteps += sum(len(path.obs) for path in new_paths)\n\n dataset = Dataset.from_paths(venv, paths)\n print('Generated', len(dataset.obs), 'timesteps total')\n dataset.save(args.savefile)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-v', '--verbose', action='store_true',\n help='enable verbose output (for debugging)')\n parser.add_argument(\n '-c', '--category_num', type=int,\n help='number of object categories (obsolete)')\n parser.add_argument(\n '-m', '--model', type=str, required=True,\n help=('[yolov3-tiny|yolov3|yolov3-spp|yolov4-tiny|yolov4|'\n 'yolov4-csp|yolov4x-mish|yolov4-p5]-[{dimension}], where '\n '{dimension} could be either a single number (e.g. '\n '288, 416, 608) or 2 numbers, WxH (e.g. 416x256)'))\n parser.add_argument(\n '--int8', action='store_true',\n help='build INT8 TensorRT engine')\n parser.add_argument(\n '--dla_core', type=int, default=-1,\n help='id of DLA core for inference (0 ~ N-1)')\n args = parser.parse_args()\n\n engine = build_engine(\n args.model, args.int8, args.dla_core, args.verbose)\n if engine is None:\n raise SystemExit('ERROR: failed to build the TensorRT engine!')\n\n engine_path = '%s.trt' % args.model\n with open(engine_path, 'wb') as f:\n f.write(engine.serialize())\n print('Serialized the TensorRT engine to file: %s' % engine_path)", "def run_cmd(self, argvs: list):\n self.parser = argparse.ArgumentParser(description=\"Run the {} module.\".format(self.name),\n prog='hub run {}'.format(self.name),\n usage='%(prog)s',\n add_help=True)\n self.arg_input_group = self.parser.add_argument_group(title=\"Input options\", description=\"Input data. Required\")\n self.arg_config_group = self.parser.add_argument_group(\n title=\"Config options\", description=\"Run configuration for controlling module behavior, not required.\")\n self.add_module_config_arg()\n self.add_module_input_arg()\n args = self.parser.parse_args(argvs)\n\n results = self.predict(image_list=[args.input_path],\n save_path=args.output_dir,\n visualization=args.visualization)\n\n return results", "def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()", "def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def test_opt_presets(self):\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n # hardcoded example\n opt = pp.parse_args(['--model', 'transformer/generator', '-o', 'gen/meena'])\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 40\n # and preference for command line over opt presets\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(\n ['--model', 'transformer/generator', '-o', 'gen/meena', '--topk', '7']\n )\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 7\n # double check ordering doesn't matter\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(\n ['--model', 'transformer/generator', '--topk', '8', '-o', 'gen/meena']\n )\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 8\n # check composability\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(['-o', 'arch/blenderbot_3B,gen/meena'])\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['model'] == 'transformer/generator'\n assert opt['n_encoder_layers'] == 2", "def main():\n\n argparser = ArgumentParser()\n argparser.add_argument('--datapath', '-D', type=str, help='Relative path to cwd of a local data file')\n argparser.add_argument('--attack_model', '-AM', type=str, default='ANY', choices=['RandomForest', 'LogReg', 'LinearSVC', 'SVC', 'KNN', 'ANY'])\n argparser.add_argument('--runconfig', '-RC', default='runconfig_mia.json', type=str, help='Path relative to cwd of runconfig file')\n argparser.add_argument('--outdir', '-O', default='outputs/test', type=str, help='Path relative to cwd for storing output files')\n args = argparser.parse_args()\n\n # Load runconfig\n with open(path.join(cwd, args.runconfig)) as f:\n runconfig = json.load(f)\n print('Runconfig:')\n print(runconfig)\n\n # Load data\n RawDF, metadata = load_local_data_as_df(path.join(cwd, args.datapath))\n dname = args.datapath.split('/')[-1]\n RawDF['ID'] = [f'ID{i}' for i in arange(len(RawDF))]\n RawDF = RawDF.set_index('ID')\n\n print(f'Loaded data {dname}:')\n print(RawDF.info())\n\n # Randomly select nt target records T = (t_1, ..., t_(nt))\n targetIDs = choice(list(RawDF.index), size=runconfig['nTargets'], replace=False).tolist()\n Targets = RawDF.loc[targetIDs, :]\n\n # Drop targets from sample population\n RawDFdropT = RawDF.drop(targetIDs)\n\n # Add a crafted outlier target to the evaluation set\n targetCraft = craft_outlier(RawDF, runconfig['sizeTargetCraft'])\n targetIDs.extend(list(set(targetCraft.index)))\n Targets = Targets.append(targetCraft)\n\n # Sample adversary's background knowledge RawA\n rawAidx = choice(list(RawDFdropT.index), size=runconfig['sizeRawA'], replace=False).tolist()\n\n # Sample k independent target test sets\n rawTindices = [choice(list(RawDFdropT.index), size=runconfig['sizeRawT'], replace=False).tolist() for nr in range(runconfig['nIter'])]\n\n # List of candidate generative models to evaluate\n gmList = []\n for gm, paramsList in runconfig['generativeModels'].items():\n if gm == 'IndependentHistogram':\n for params in paramsList:\n gmList.append(IndependentHistogram(*params))\n elif gm == 'BayesianNet':\n for params in paramsList:\n gmList.append(BayesianNet(*params))\n elif gm == 'PrivBayes':\n for params in paramsList:\n gmList.append(PrivBayes(*params))\n elif gm == 'CTGAN':\n for params in paramsList:\n gmList.append(CTGAN(metadata, *params))\n elif gm == 'PateGan':\n for params in paramsList:\n gmList.append(PateGan(metadata, *params))\n else:\n raise ValueError(f'Unknown GM {gm}')\n\n for GenModel in gmList:\n print(f'----- {GenModel.__name__} -----')\n\n FeatureList = [NaiveFeatureSet(GenModel.datatype), HistogramFeatureSet(GenModel.datatype, metadata), CorrelationsFeatureSet(GenModel.datatype, metadata), EnsembleFeatureSet(GenModel.datatype, metadata)]\n\n prior = {LABEL_IN: runconfig['prior']['IN'], LABEL_OUT: runconfig['prior']['OUT']}\n\n if args.attack_model == 'RandomForest':\n AttacksList = [MIAttackClassifierRandomForest(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LogReg':\n AttacksList = [MIAttackClassifierLogReg(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LinearSVC':\n AttacksList = [MIAttackClassifierLinearSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'SVC':\n AttacksList = [MIAttackClassifierSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'KNN':\n AttacksList = [MIAttackClassifierKNN(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'ANY':\n AttacksList = []\n for F in FeatureList:\n AttacksList.extend([MIAttackClassifierRandomForest(metadata, prior, F),\n MIAttackClassifierLogReg(metadata, prior, F),\n MIAttackClassifierKNN(metadata, prior, F)])\n else:\n raise ValueError(f'Unknown AM {args.attack_model}')\n\n # Run privacy evaluation under MIA adversary\n results = evaluate_mia(GenModel, AttacksList, RawDFdropT, Targets, targetIDs, rawAidx, rawTindices,\n runconfig['sizeRawT'], runconfig['sizeSynT'], runconfig['nSynT'],\n runconfig['nSynA'], runconfig['nShadows'], metadata)\n\n outfile = f\"{dname}{GenModel.__name__}MIA\"\n\n with open(path.join(f'{args.outdir}', f'{outfile}.json'), 'w') as f:\n json.dump(results, f, indent=2, default=json_numpy_serialzer)", "def main():\n if sys.argv[1] == 'train':\n build_training_data()\n main_trainer()\n\n if sys.argv[1] == 'predict' and len(sys.argv) > 2:\n from predict import classification\n from AdaboostPredict import decision_stumps\n input_file = open(sys.argv[2])\n data = input_file.readlines()\n print(\"Decision Tree prediction\")\n for i in data:\n print(classification(i, i.strip().split()))\n\n print(\"\\nAdaboost prediction\")\n\n for i in data:\n print(decision_stumps(i, i.strip().split()))\n\n elif sys.argv[1] == 'predict':\n print('Wrong usage for prediction. Please supply a file after predict')", "def main(args):\n env = gym.make(\"MountainCar-v0\")\n # Enabling layer_norm here is import for parameter space noise!\n model = deepq.models.mlp([64], layer_norm=True)\n act = deepq.learn(\n env,\n q_func=model,\n learning_rate=1e-3,\n max_timesteps=args.max_timesteps,\n buffer_size=50000,\n exploration_fraction=0.1,\n exploration_final_eps=0.1,\n print_freq=10,\n param_noise=True\n )\n print(\"Saving model to mountaincar_model.pkl\")\n act.save(\"mountaincar_model.pkl\")", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', type=str, help='path to corpus file', default='./train')\n args = parser.parse_args()\n\n corpus_reader = CorpusReader(args.file)\n model = BigramModel(corpus_reader.sents())\n\n test_sentences = ['Suggestive, Watson, is it not?',\n 'It is amazing that a family can be torn apart by something as simple as a pack of wild dogs!',\n 'So spoke Sherlock Holmes and turned back to the great scrapbook in which he was arranging and indexing some of his recent material.',\n 'What I like best about my friends is that they are few.',\n 'Friends what is like are they about I best few my that.']\n\n # prints two paragraphs with each five sentences\n for _ in range(2):\n print(generate(model, 5) + '\\n')\n\n # for each sentence in the test_sentences print the perplexity\n for sentence in test_sentences:\n print(model.perplexity(nltk.word_tokenize(sentence)))", "def init_argparser() -> ArgumentParser:\n parser = ArgumentParser()\n from_config = parser.add_argument_group('From config file', 'Provide full experiment setup via config file')\n from_config.add_argument('-c', '--config', help='Path to json file containing classification config.')\n from_cmd = parser.add_argument_group('From commandline', 'Specify experiment setup via commandline arguments')\n\n # Model options\n from_cmd.add_argument(\"--recoding_type\", type=str, default=None,\n choices=[\"mc_dropout\", \"surprisal\", \"ensemble\"],\n help=\"Recoding model type used for trainign. Choices include recoding based on MC Dropout,\"\n \"perplexity and anchored ensembles. If not specified, a vanilla model without recoding\"\n \"is used.\")\n from_cmd.add_argument(\"--step_type\", type=str, default=None, choices=[\"fixed\", \"mlp\", \"learned\"],\n help=\"Specifies the way the step size is determined when using a recoding model.\")\n from_cmd.add_argument(\"--step_size\", type=float,\n help=\"Step size for recoding in case the fixed step predictor is used.\")\n from_cmd.add_argument(\"--embedding_size\", type=int, help=\"Dimensionality of word embeddings.\")\n from_cmd.add_argument(\"--hidden_size\", type=int, help=\"Dimensionality of hidden states.\")\n from_cmd.add_argument(\"--num_layers\", type=int, help=\"Number of network layers.\")\n from_cmd.add_argument(\"--mc_dropout\", type=float, help=\"Dropout probability when estimating uncertainty.\")\n from_cmd.add_argument(\"--dropout\", type=float, help=\"Dropout probability for model in general.\")\n from_cmd.add_argument(\"--num_samples\", type=int, help=\"Number of samples used when estimating uncertainty.\")\n\n # Training options\n from_cmd.add_argument(\"--weight_decay\", type=float, help=\"Weight decay parameter when estimating uncertainty.\")\n from_cmd.add_argument(\"--prior_scale\", type=float,\n help=\"Prior length scale. A lower scale signifies a prior belief that the input data is \"\n \"distributed infrequently, a higher scale does the opposite.\")\n from_cmd.add_argument(\"--learning_rate\", type=float, help=\"Learning rate during training.\")\n from_cmd.add_argument(\"--batch_size\", type=int, help=\"Batch size during training.\")\n from_cmd.add_argument(\"--num_epochs\", type=int, help=\"Number of training epochs.\")\n from_cmd.add_argument(\"--clip\", type=float, help=\"Threshold for gradient clipping.\")\n\n # Corpus options\n from_cmd.add_argument(\"--corpus_dir\", type=str, help=\"Directory to corpus files.\")\n from_cmd.add_argument(\"--max_seq_len\", type=int, help=\"Maximum sentence length when reading in the corpora.\")\n\n # Screen output optins\n from_cmd.add_argument(\"--print_every\", type=int, help=\"Batch interval at which training info should be printed.\")\n from_cmd.add_argument(\"--eval_every\", type=int,\n help=\"Epoch interval at which the model should be evaluated on validation set.\")\n\n # Model saving and logging options\n from_cmd.add_argument(\"--model_name\", type=str, help=\"Model identifier.\")\n from_cmd.add_argument(\"--model_save_path\", type=str,\n help=\"Directory to which current best model should be saved to.\")\n from_cmd.add_argument(\"--device\", type=str, default=\"cpu\", help=\"Device used for training.\")\n from_cmd.add_argument(\"--log_dir\", type=str, help=\"Directory to write (tensorboard) logs to.\")\n\n return parser", "def novel_pred(novel_pred_file):\n print_section_title('Novel predict')\n\n # Get significantly conserved hits against known protein database\n if pre.exists_hit_file == False:\n print ' Blast searches against known database:', pre.blast_db\n alc_file = pre.project_name + '.alc'\n csqa_file = pre.project_name + '.csqa'\n pre.hit_file = csqa_file + '.blast'\n \n cmd = '%s -p -n %s -a -s %s' % (pre.get_data, pre.project_name, pre.seqs_file)\n call(cmd, shell = True, stdout = PIPE)\n cmd = '%s -n %s -s %s -c %s -q 1' % (pre.get_data, pre.project_name, pre.seqs_file, alc_file)\n call(cmd, shell = True, stdout = PIPE)\n cmd = '%s -i %s -d %s -m 8 -v 1 -b 1 -e 1 -o %s' % (pre.rpsblast, csqa_file, pre.blast_db, pre.hit_file)\n call(cmd, shell = True, stdout = PIPE)\n print cmd\n rm_files([alc_file, csqa_file])\n else:\n print ' Using given blast result file:', pre.hit_file\n\n # Get training sets for training SVM model based on significant hits\n is_unix = 0\n if is_win == 0:\n is_unix = 1\n print ' Seeds e-value cut:', pre.seeds_ev\n bh_file = pre.hit_file + '.bh'\n cmd = '%s -f %s -o %s -e %s' % (pre.parse_blast, pre.hit_file, bh_file, pre.seeds_ev)\n call(cmd, shell = True, stdout = PIPE)\n cmd = '%s -n %s -s %s -c %s -h -t %s -b %s -u' % \\\n (pre.get_data, pre.project_name, pre.seqs_file, bh_file, pre.tismodel, pre.bin_file)\n if is_unix == 0:\n cmd = '%s -n %s -s %s -c %s -h -t %s -b %s' % \\\n (pre.get_data, pre.project_name, pre.seqs_file, bh_file, pre.tismodel, pre.bin_file)\n call(cmd, shell=True, stdout=PIPE)\n\n # Get predicting data\n cmd = '%s -p -n %s -s %s -t %s -b %s -u %i' % (pre.get_data, pre.project_name, pre.seqs_file, pre.tismodel, pre.bin_file, is_unix)\n call(cmd, shell = True, stdout = PIPE)\n\n # Get binning groups\n fin_bin = open(pre.bin_file, 'r')\n bin_groups = []\n for line in fin_bin.readlines():\n group = line.split()[-1]\n if group not in bin_groups:\n bin_groups.append(group)\n\n threshold4training = 50\n print ' SVM subset size:', pre.svm_sub_size\n print ' Threshold of positive instance size for training:', threshold4training\n nn_1_file = novel_pred_file+'.1'\n fout_nn_1 = open(nn_1_file, 'w')\n for group in bin_groups:\n print '\\t', group \n enough4training = {'00' : False, '01' : False, '10' : False, '11' : False}\n orf_types = ['00', '01', '10', '11']\n # check if the positive instance is enough for training svm model\n for tt in orf_types:\n if trainingset_counter(pre.project_name + '.' + group + '.trainfea_' + tt)['+1'] > threshold4training:\n enough4training[tt] = True\n# print ' ', group, enough4training\n \n # train svm model based on genes with significant hits and use them for predicting\n this_nn_1_file = nn_1_file[0:nn_1_file.rfind('.')] + '.' + group + '.novel-pred'\n fout_this_novel = open(this_nn_1_file, 'w')\n fout_this_novel.close()\n \n for tt in orf_types:\n trainfea_file = pre.project_name + '.' + group + '.trainfea_' + tt\n fea_file = pre.project_name + '.' + group + '.fea_' + tt\n orf_file = pre.project_name + '.' + group + '.orf_' + tt\n range_file = trainfea_file + '.range'\n model_file = trainfea_file + '.model' \n cds_pred_file = fea_file + '.cds'\n \n if enough4training[tt] == False:\n rm_files([trainfea_file, fea_file, orf_file])\n continue\n \n # train\n cmd = '%s -m 1000 -s %i -b %i -l 0 -u 1 %s' % (pre.train_cds_model_py, pre.svm_sub_size, pre.prob_status, trainfea_file)\n call(cmd, shell=True, stdout=PIPE)\n rm_files([trainfea_file, trainfea_file+'.scale.sub.out', trainfea_file+'.scale.sub.cross-acc'])\n\n # predict\n svm_predict(range_file, model_file, fea_file, orf_file, pre.svm_cut_value2, pre.prob_status, cds_pred_file)\n cmd = '%s -a %s -b %s --a+b -o %s' % (pre.metalocs_operate, cds_pred_file, this_nn_1_file, this_nn_1_file)\n call(cmd, shell = True, stdout = PIPE)\n rm_files([range_file, model_file, fea_file, orf_file, cds_pred_file])\n cmd = '%s -a %s -b %s --a+b -o %s' % (pre.metalocs_operate, this_nn_1_file, nn_1_file, nn_1_file)\n call(cmd, shell=True, stdout=PIPE)\n rm_files([this_nn_1_file])\n fout_nn_1.close()\n\n print ' Blast e-valule cut:', pre.blast_ev\n bl_file = pre.hit_file + '.bl'\n cmd = '%s -f %s -o %s -e %s' % (pre.parse_blast, pre.hit_file, bl_file, pre.blast_ev)\n call(cmd, shell=True, stdout=PIPE)\n\n cmd = '%s -a %s -b %s --a+b -o %s' % (pre.metalocs_operate, nn_1_file, bl_file, novel_pred_file)\n call(cmd, shell=True, stdout=PIPE)\n# print ' Final predictions store in file:', novel_pred_file\n rm_files([bl_file, bh_file, nn_1_file])\n return", "def main(**kwargs):\n data_file = kwargs.get('data_file', None)\n predict_unlabelled = kwargs.get('predict_unlabelled', False)\n output_preds = kwargs.get('output_preds', True)\n eval_results = kwargs.get('eval_results', True)\n\n # Prepare run_str\n run_str = datetime.now().strftime('%Y%m%d%H%M')\n\n initialise_print_logger('logs/prediction-' + run_str + '.log')\n\n print('Starting sharecast prediction:', run_str)\n\n # Load and divide data\n share_data = load_data(data_file)\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n share_data[share_data['symbol'] == 'NA'].shape[0])\n\n # Divide data into symbols and general data for training an testing\n if predict_unlabelled:\n # Only return x values\n df_all_x, df_symbol_date = prepare_data_for_model(share_data, False)\n else:\n # Return x and y values\n df_all_x, df_all_y, df_all_actuals, df_symbol_date = prepare_data_for_model(\n share_data, True)\n\n del df_all_y\n\n del share_data\n gc.collect()\n\n print('Number of \"NA\" symbols:',\n df_symbol_date[df_symbol_date['symbol'] == 'NA'].shape[0])\n\n # Retain model names for train and test\n print('Retaining model name data. Number of rows:', len(df_all_x))\n model_names = df_all_x['model'].values\n gics_sectors = df_all_x['GICSSector'].values\n gics_industry_groups = df_all_x['GICSIndustryGroup'].values\n gics_industries = df_all_x['GICSIndustry'].values\n\n # Fix the names used in the GICS data - remove '&' ',' and ' '\n gics_sectors = fix_categorical(gics_sectors)\n gics_industry_groups = fix_categorical(gics_industry_groups)\n gics_industries = fix_categorical(gics_industries)\n\n # Drop model names and GICS values\n df_all_x = df_all_x.drop(\n ['model', 'GICSSector', 'GICSIndustryGroup', 'GICSIndustry'], axis=1)\n\n print('Loading pre-processing models')\n # Load pre-processing models\n symbol_encoder = load('models/se.pkl.gz')\n imputer = load('models/imputer.pkl.gz')\n scaler = load('models/scaler.pkl.gz')\n\n print('Executing pre-processing. Number of rows:', len(df_all_x))\n # Execute pre-processing\n df_all_x = execute_preprocessor(df_all_x, symbol_encoder, imputer, scaler)\n\n print('Loading keras models. Number of rows:', len(df_all_x))\n # Load keras models\n keras_models = {\n 'mape_model': load_model('models/keras-mape-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_model': load_model('models/keras-mae-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n 'mae_intermediate_model': load_model('models/keras-mae-intermediate-model.h5',\n custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n }),\n }\n\n print('Loading xgboost model list')\n xgb_models = load_xgb_models()\n\n print('Loading xgboost industry model list')\n xgb_industry_models = load_xgb_models('industry')\n\n predictions = execute_model_predictions(\n df_all_x, model_names, gics_industry_groups, xgb_models, xgb_industry_models, keras_models)\n\n print('Loading bagging models')\n bagging_model = load_model('models/keras-bagging-model.h5', custom_objects={\n 'k_mean_absolute_percentage_error': k_mean_absolute_percentage_error,\n 'k_mae_mape': k_mae_mape,\n })\n bagging_scaler = load('models/deep-bagging-scaler.pkl.gz')\n deep_bagged_predictions = execute_deep_bagging(\n bagging_model, bagging_scaler, predictions)\n predictions['deep_bagged_predictions'] = deep_bagged_predictions\n\n if eval_results:\n assess_results(predictions, model_names, df_all_actuals, run_str)\n\n if output_preds:\n output_predictions(predictions, df_symbol_date, run_str)\n\n print('Prediction completed')", "def parse_arguments(args_to_parse):\n\tdefault_config = get_config_section([CONFIG_FILE], \"Custom\")\n\n\tdescription = \"PyTorch implementation and evaluation of disentangled Variational AutoEncoders and metrics.\"\n\tparser = argparse.ArgumentParser(description=description,\n\t\t\t\t\t\t\t\t\t formatter_class=FormatterNoDuplicate)\n\n\t# General options\n\tgeneral = parser.add_argument_group('General options')\n\tgeneral.add_argument('name', type=str,\n\t\t\t\t\t\t help=\"Name of the model for storing and loading purposes.\")\n\tgeneral.add_argument('-L', '--log-level', help=\"Logging levels.\",\n\t\t\t\t\t\t default=default_config['log_level'], choices=LOG_LEVELS)\n\tgeneral.add_argument('--no-progress-bar', action='store_true',\n\t\t\t\t\t\t default=default_config['no_progress_bar'],\n\t\t\t\t\t\t help='Disables progress bar.')\n\tgeneral.add_argument('--no-cuda', action='store_true',\n\t\t\t\t\t\t default=default_config['no_cuda'],\n\t\t\t\t\t\t help='Disables CUDA training, even when have one.')\n\tgeneral.add_argument('-s', '--seed', type=int, default=default_config['seed'],\n\t\t\t\t\t\t help='Random seed. Can be `None` for stochastic behavior.')\n\tgeneral.add_argument('-c', '--classifier', type = str,\n\t\t\t\t\t\t help='Name of the classifeir')\n\t# Learning options\n\ttraining = parser.add_argument_group('Training specific options')\n\ttraining.add_argument('--checkpoint-every',\n\t\t\t\t\t\t type=int, default=default_config['checkpoint_every'],\n\t\t\t\t\t\t help='Save a checkpoint of the trained model every n epoch.')\n\ttraining.add_argument('-d', '--dataset', help=\"Path to training data.\",\n\t\t\t\t\t\t default=default_config['dataset'], choices=DATASETS)\n\ttraining.add_argument('-x', '--experiment',\n\t\t\t\t\t\t default=default_config['experiment'], choices=EXPERIMENTS,\n\t\t\t\t\t\t help='Predefined experiments to run. If not `custom` this will overwrite some other arguments.')\n\ttraining.add_argument('-e', '--epochs', type=int,\n\t\t\t\t\t\t default=default_config['epochs'],\n\t\t\t\t\t\t help='Maximum number of epochs to run for.')\n\ttraining.add_argument('-b', '--batch-size', type=int,\n\t\t\t\t\t\t default=default_config['batch_size'],\n\t\t\t\t\t\t help='Batch size for training.')\n\ttraining.add_argument('--lr', type=float, default=default_config['lr'],\n\t\t\t\t\t\t help='Learning rate.')\n\ttraining.add_argument('-g', '--gamma', type = float, default = 1., help = 'gamma for var')\n\n\t# Model Options\n\tmodel = parser.add_argument_group('Model specfic options')\n\tmodel.add_argument('-m', '--model-type',\n\t\t\t\t\t default=default_config['model'], choices=MODELS,\n\t\t\t\t\t help='Type of encoder and decoder to use.')\n\tmodel.add_argument('-z', '--latent-dim', type=int,\n\t\t\t\t\t default=default_config['latent_dim'],\n\t\t\t\t\t help='Dimension of the latent variable.')\n\tmodel.add_argument('-l', '--loss',\n\t\t\t\t\t default=default_config['loss'], choices=LOSSES,\n\t\t\t\t\t help=\"Type of VAE loss function to use.\")\n\tmodel.add_argument('-r', '--rec-dist', default=default_config['rec_dist'],\n\t\t\t\t\t choices=RECON_DIST,\n\t\t\t\t\t help=\"Form of the likelihood ot use for each pixel.\")\n\tmodel.add_argument('-a', '--reg-anneal', type=float,\n\t\t\t\t\t default=default_config['reg_anneal'],\n\t\t\t\t\t help=\"Number of annealing steps where gradually adding the regularisation. What is annealed is specific to each loss.\")\n\n\t# Loss Specific Options\n\tbetaH = parser.add_argument_group('BetaH specific parameters')\n\tbetaH.add_argument('--betaH-B', type=float,\n\t\t\t\t\t default=default_config['betaH_B'],\n\t\t\t\t\t help=\"Weight of the KL (beta in the paper).\")\n\n\tbetaB = parser.add_argument_group('BetaB specific parameters')\n\tbetaB.add_argument('--betaB-initC', type=float,\n\t\t\t\t\t default=default_config['betaB_initC'],\n\t\t\t\t\t help=\"Starting annealed capacity.\")\n\tbetaB.add_argument('--betaB-finC', type=float,\n\t\t\t\t\t default=default_config['betaB_finC'],\n\t\t\t\t\t help=\"Final annealed capacity.\")\n\tbetaB.add_argument('--betaB-G', type=float,\n\t\t\t\t\t default=default_config['betaB_G'],\n\t\t\t\t\t help=\"Weight of the KL divergence term (gamma in the paper).\")\n\n\tfactor = parser.add_argument_group('factor VAE specific parameters')\n\tfactor.add_argument('--factor-G', type=float,\n\t\t\t\t\t\tdefault=default_config['factor_G'],\n\t\t\t\t\t\thelp=\"Weight of the TC term (gamma in the paper).\")\n\tfactor.add_argument('--lr-disc', type=float,\n\t\t\t\t\t\tdefault=default_config['lr_disc'],\n\t\t\t\t\t\thelp='Learning rate of the discriminator.')\n\n\tbtcvae = parser.add_argument_group('beta-tcvae specific parameters')\n\tbtcvae.add_argument('--btcvae-A', type=float,\n\t\t\t\t\t\tdefault=default_config['btcvae_A'],\n\t\t\t\t\t\thelp=\"Weight of the MI term (alpha in the paper).\")\n\tbtcvae.add_argument('--btcvae-G', type=float,\n\t\t\t\t\t\tdefault=default_config['btcvae_G'],\n\t\t\t\t\t\thelp=\"Weight of the dim-wise KL term (gamma in the paper).\")\n\tbtcvae.add_argument('--btcvae-B', type=float,\n\t\t\t\t\t\tdefault=default_config['btcvae_B'],\n\t\t\t\t\t\thelp=\"Weight of the TC term (beta in the paper).\")\n\n\t# Learning options\n\tevaluation = parser.add_argument_group('Evaluation specific options')\n\tevaluation.add_argument('--is-eval-only', action='store_true',\n\t\t\t\t\t\t\tdefault=default_config['is_eval_only'],\n\t\t\t\t\t\t\thelp='Whether to only evaluate using precomputed model `name`.')\n\tevaluation.add_argument('--is-metrics', action='store_true',\n\t\t\t\t\t\t\tdefault=default_config['is_metrics'],\n\t\t\t\t\t\t\thelp=\"Whether to compute the disentangled metrcics. Currently only possible with `dsprites` as it is the only dataset with known true factors of variations.\")\n\tevaluation.add_argument('--no-test', action='store_true',\n\t\t\t\t\t\t\tdefault=default_config['no_test'],\n\t\t\t\t\t\t\thelp=\"Whether not to compute the test losses.`\")\n\tevaluation.add_argument('--eval-batchsize', type=int,\n\t\t\t\t\t\t\tdefault=default_config['eval_batchsize'],\n\t\t\t\t\t\t\thelp='Batch size for evaluation.')\n\n\targs = parser.parse_args(args_to_parse)\n\tif args.experiment != 'custom':\n\t\tif args.experiment not in ADDITIONAL_EXP:\n\t\t\t# update all common sections first\n\t\t\tmodel, dataset = args.experiment.split(\"_\")\n\t\t\tcommon_data = get_config_section([CONFIG_FILE], \"Common_{}\".format(dataset))\n\t\t\tupdate_namespace_(args, common_data)\n\t\t\tcommon_model = get_config_section([CONFIG_FILE], \"Common_{}\".format(model))\n\t\t\tupdate_namespace_(args, common_model)\n\n\t\ttry:\n\t\t\texperiments_config = get_config_section([CONFIG_FILE], args.experiment)\n\t\t\tupdate_namespace_(args, experiments_config)\n\t\texcept KeyError as e:\n\t\t\tif args.experiment in ADDITIONAL_EXP:\n\t\t\t\traise e # only reraise if didn't use common section\n\n\treturn args", "def parse_opts():\n MODELS = core.list_models()\n flags = [arg for arg in sys.argv[1:]\n if arg.startswith('-')]\n values = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' in arg]\n args = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' not in arg]\n models = \"\\n \".join(\"%-15s\"%v for v in MODELS)\n if len(args) == 0:\n print(USAGE)\n print(\"\\nAvailable models:\")\n print(columnize(MODELS, indent=\" \"))\n sys.exit(1)\n if len(args) > 3:\n print(\"expected parameters: model N1 N2\")\n\n name = args[0]\n try:\n model_info = core.load_model_info(name)\n except ImportError as exc:\n print(str(exc))\n print(\"Could not find model; use one of:\\n \" + models)\n sys.exit(1)\n\n invalid = [o[1:] for o in flags\n if o[1:] not in NAME_OPTIONS\n and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)]\n if invalid:\n print(\"Invalid options: %s\"%(\", \".join(invalid)))\n sys.exit(1)\n\n\n # pylint: disable=bad-whitespace\n # Interpret the flags\n opts = {\n 'plot' : True,\n 'view' : 'log',\n 'is2d' : False,\n 'qmax' : 0.05,\n 'nq' : 128,\n 'res' : 0.0,\n 'accuracy' : 'Low',\n 'cutoff' : 0.0,\n 'seed' : -1, # default to preset\n 'mono' : False,\n 'show_pars' : False,\n 'show_hist' : False,\n 'rel_err' : True,\n 'explore' : False,\n 'use_demo' : True,\n 'zero' : False,\n }\n engines = []\n for arg in flags:\n if arg == '-noplot': opts['plot'] = False\n elif arg == '-plot': opts['plot'] = True\n elif arg == '-linear': opts['view'] = 'linear'\n elif arg == '-log': opts['view'] = 'log'\n elif arg == '-q4': opts['view'] = 'q4'\n elif arg == '-1d': opts['is2d'] = False\n elif arg == '-2d': opts['is2d'] = True\n elif arg == '-exq': opts['qmax'] = 10.0\n elif arg == '-highq': opts['qmax'] = 1.0\n elif arg == '-midq': opts['qmax'] = 0.2\n elif arg == '-lowq': opts['qmax'] = 0.05\n elif arg == '-zero': opts['zero'] = True\n elif arg.startswith('-nq='): opts['nq'] = int(arg[4:])\n elif arg.startswith('-res='): opts['res'] = float(arg[5:])\n elif arg.startswith('-accuracy='): opts['accuracy'] = arg[10:]\n elif arg.startswith('-cutoff='): opts['cutoff'] = float(arg[8:])\n elif arg.startswith('-random='): opts['seed'] = int(arg[8:])\n elif arg == '-random': opts['seed'] = np.random.randint(1e6)\n elif arg == '-preset': opts['seed'] = -1\n elif arg == '-mono': opts['mono'] = True\n elif arg == '-poly': opts['mono'] = False\n elif arg == '-pars': opts['show_pars'] = True\n elif arg == '-nopars': opts['show_pars'] = False\n elif arg == '-hist': opts['show_hist'] = True\n elif arg == '-nohist': opts['show_hist'] = False\n elif arg == '-rel': opts['rel_err'] = True\n elif arg == '-abs': opts['rel_err'] = False\n elif arg == '-half': engines.append(arg[1:])\n elif arg == '-fast': engines.append(arg[1:])\n elif arg == '-single': engines.append(arg[1:])\n elif arg == '-double': engines.append(arg[1:])\n elif arg == '-single!': engines.append(arg[1:])\n elif arg == '-double!': engines.append(arg[1:])\n elif arg == '-quad!': engines.append(arg[1:])\n elif arg == '-sasview': engines.append(arg[1:])\n elif arg == '-edit': opts['explore'] = True\n elif arg == '-demo': opts['use_demo'] = True\n elif arg == '-default': opts['use_demo'] = False\n # pylint: enable=bad-whitespace\n\n if len(engines) == 0:\n engines.extend(['single', 'sasview'])\n elif len(engines) == 1:\n if engines[0][0] != 'sasview':\n engines.append('sasview')\n else:\n engines.append('single')\n elif len(engines) > 2:\n del engines[2:]\n\n n1 = int(args[1]) if len(args) > 1 else 1\n n2 = int(args[2]) if len(args) > 2 else 1\n use_sasview = any(engine=='sasview' and count>0\n for engine, count in zip(engines, [n1, n2]))\n\n # Get demo parameters from model definition, or use default parameters\n # if model does not define demo parameters\n pars = get_pars(model_info, opts['use_demo'])\n\n\n # Fill in parameters given on the command line\n presets = {}\n for arg in values:\n k, v = arg.split('=', 1)\n if k not in pars:\n # extract base name without polydispersity info\n s = set(p.split('_pd')[0] for p in pars)\n print(\"%r invalid; parameters are: %s\"%(k, \", \".join(sorted(s))))\n sys.exit(1)\n presets[k] = float(v) if not k.endswith('type') else v\n\n # randomize parameters\n #pars.update(set_pars) # set value before random to control range\n if opts['seed'] > -1:\n pars = randomize_pars(pars, seed=opts['seed'])\n print(\"Randomize using -random=%i\"%opts['seed'])\n if opts['mono']:\n pars = suppress_pd(pars)\n pars.update(presets) # set value after random to control value\n #import pprint; pprint.pprint(model_info)\n constrain_pars(model_info, pars)\n if use_sasview:\n constrain_new_to_old(model_info, pars)\n if opts['show_pars']:\n print(str(parlist(model_info, pars, opts['is2d'])))\n\n # Create the computational engines\n data, _ = make_data(opts)\n if n1:\n base = make_engine(model_info, data, engines[0], opts['cutoff'])\n else:\n base = None\n if n2:\n comp = make_engine(model_info, data, engines[1], opts['cutoff'])\n else:\n comp = None\n\n # pylint: disable=bad-whitespace\n # Remember it all\n opts.update({\n 'name' : name,\n 'def' : model_info,\n 'n1' : n1,\n 'n2' : n2,\n 'presets' : presets,\n 'pars' : pars,\n 'data' : data,\n 'engines' : [base, comp],\n })\n # pylint: enable=bad-whitespace\n\n return opts", "def __init__(\n self,\n hparams: argparse.Namespace,\n num_labels=None,\n mode=\"base\",\n config=None,\n tokenizer=None,\n model=None,\n **config_kwargs\n ):\n super().__init__()\n # TODO: move to self.save_hyperparameters()\n # self.save_hyperparameters()\n # can also expand arguments into trainer signature for easier reading\n\n self.save_hyperparameters(hparams)\n self.step_count = 0\n self.output_dir = Path(self.hparams.output_dir)\n cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None\n if config is None:\n self.config = AutoConfig.from_pretrained(\n self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,\n **({\"num_labels\": num_labels} if num_labels is not None else {}),\n cache_dir=cache_dir,\n **config_kwargs,\n )\n else:\n self.config: BartConfig = config\n\n extra_model_params = (\"encoder_layerdrop\", \"decoder_layerdrop\", \"dropout\", \"attention_dropout\")\n for p in extra_model_params:\n if getattr(self.hparams, p, None):\n assert hasattr(self.config, p), f\"model config doesn't have a `{p}` attribute\"\n setattr(self.config, p, getattr(self.hparams, p))\n\n if tokenizer is None:\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,\n cache_dir=cache_dir,\n )\n else:\n self.tokenizer: BartTokenizer = tokenizer\n # self.model_type = MODEL_MODES[mode]\n if model is None:\n self.model = self.model_type.from_pretrained(\n self.hparams.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.hparams.model_name_or_path),\n config=self.config,\n cache_dir=cache_dir,\n )\n else:\n self.model = model", "def main(\n network_type: NetworkType = Argument(..., help=\"type of the VAE network\"),\n bottleneck_dim: int = Option(\n 16, \"--bottleneck_dim\", \"-n\", help=\"size of the VAE bottleneck\"\n ),\n lr: float = Option(0.001, \"--lr\", \"-r\", help=\"learning rate for training\"),\n batch_size: int = Option(..., \"--batch_size\", \"-b\", help=\"batch size for training\"),\n epochs: int = Option(..., \"--epochs\", \"-e\", help=\"epochs to train\"),\n device: str = Option(\n \"cpu\", \"--device\", \"-d\", help='device to train on, e.g. \"cuda:0\"'\n ),\n logdir: str = Option(\n \"./results\",\n \"--logdir\",\n \"-l\",\n help=\"directory to log the models and event file to\",\n ),\n):\n\n mnist_data = dataset.MyMNIST()\n\n if network_type == NetworkType.mlp:\n net = model.MLPVAE((1, 32, 32), bottleneck_dim)\n else:\n net = model.CNNVAE((1, 32, 32), bottleneck_dim)\n\n optim = torch.optim.Adam(net.parameters(), lr)\n vae_trainer = trainer.Trainer(net, mnist_data, optim, batch_size, device, logdir)\n vae_trainer.train(epochs)", "def main():\n \n\n parser = argparse.ArgumentParser(description='MozartFlow: Observing the flow of music.')\n\n parser.add_argument('-k', '--knn', help='K in K-nearest neighbours algorithm', default=2)\n parser.add_argument('-ll', '--loglevel', help='Set the logging level', type=str, choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'])\n parser.add_argument('-p', '--path', help='Filepath of the audio file, need to be labeled', type=str, default='')\n \n args = parser.parse_args()\n \n logging.basicConfig(level=args.loglevel)\n\n model = Model(args.knn, args.loglevel)\n model.model()\n\n if args.path is not '':\n model.prediction(args.path)\n else:\n print('\\n[-.-] Ain\\'t you testing something! Well, that\\'s a shame. I learned just for you.')\n\n logger.info('\\n\\n-------/------- Created by ------/-------')\n for creator in model.read_yml['_creator']:\n logger.info('Lord {}'.format(creator))", "def read_cmd(self):\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n req_opts = parser.add_argument_group(\"Required Options\")\n req_opts.add_argument(\"--instance_dir\", required=True,\n help=\"directory with instances (not recursive\")\n \n opt_opts = parser.add_argument_group(\"Optional Options\")\n \n opt_opts.add_argument(\"--fn_suffix\", default=\".*\",\n help=\"suffix of instance file names\")\n opt_opts.add_argument(\"--cutoff\", default=10, type=int,\n help=\"running time cutoff [sec]\")\n opt_opts.add_argument(\"--memlimit\", default=2048, type=int,\n help=\"memory limit\")\n opt_opts.add_argument(\"--ac_budget\", default=360,\n help=\"configuration budget [sec]\")\n opt_opts.add_argument(\"--run_obj\", default=\"runtime\",\n choices=[\"runtime\", \"quality\"],\n help=\"run objective\")\n opt_opts.add_argument(\"--par-factor\", default=10,\n help=\"Factor by which to penalize unsolved instances. Usage may differ based on TAE used.\")\n\n opt_opts.add_argument(\"--binary\", default=\"clingo\",\n help=\"target binary\")\n opt_opts.add_argument(\"--pcs_file\", default=\"pcs/all_params.pcs\",\n help=\"parameter configuration file\")\n opt_opts.add_argument(\"--runsolver\", default=\"binaries/runsolver\",\n help=\"runsolver binary\")\n opt_opts.add_argument(\"--tae_class\", default=None,\n help=\"TAE class to individualize clingo calls -- has to inherit from smac.tae.execute_ta_run_aclib.ExecuteTARunAClib\")\n\n\n opt_opts.add_argument(\"--seed\", default=12345, type=int,\n help=\"random seed\")\n opt_opts.add_argument(\"--verbose_level\", default=logging.INFO,\n choices=[\"INFO\", \"DEBUG\"],\n help=\"random seed\")\n opt_opts.add_argument(\"--tae_args\", default=\"{}\",\n help=\"Miscellaneous options for the TAE\")\n \n\n args_, misc = parser.parse_known_args()\n self._check_args(args_)\n args_.tae_args=json.loads(args_.tae_args)\n\n # remove leading '-' in option names\n misc = dict((k.lstrip(\"-\"), v.strip(\"'\"))\n for k, v in zip(misc[::2], misc[1::2]))\n\n misc[\"instances\"] = self._find_files(dir_=args_.instance_dir, suffix_=args_.fn_suffix)\n misc[\"wallclock_limit\"] = args_.ac_budget\n misc[\"cutoff_time\"] = args_.cutoff\n misc[\"paramfile\"] = args_.pcs_file\n misc[\"algo\"] = \"\"\n misc[\"run_obj\"] = args_.run_obj\n\n return args_, misc", "def parse_cmd_line_args():\n\n parser = argparse.ArgumentParser(\n description='Run FEM-BV-VARX on system with periodic time-dependence')\n\n parser.add_argument('--n-components', dest='n_components', type=int,\n default=2, help='number of FEM-BV-VARX components')\n parser.add_argument('--n-init', dest='n_init', type=int, default=10,\n help='number of initializations to try')\n parser.add_argument('--max-tv-norm', dest='max_tv_norm', type=float,\n default=None, help='maximum TV norm bound')\n parser.add_argument('--memory', dest='memory', type=int, default=1,\n help='maximum memory in FEM-BV-VARX models')\n parser.add_argument('--tolerance', dest='tolerance', type=float,\n default=1e-4, help='convergence tolerance')\n parser.add_argument('--max-iterations', dest='max_iterations', type=int,\n default=500, help='maximum number of iterations')\n parser.add_argument('--n-features', dest='n_features', type=int, default=2,\n help='dimensionality of data')\n parser.add_argument('--n-samples', dest='n_samples', type=int, default=500,\n help='length of time-series')\n parser.add_argument('--period', dest='period', type=float, default=10,\n help='period of time-dependent parameters')\n parser.add_argument('--order', dest='order', type=int, default=1,\n help='order of AR process to generate data with')\n parser.add_argument('--noise-variance', dest='noise_variance', type=float,\n default=1.0, help='magnitude of noise variance')\n parser.add_argument('--random-seed', dest='random_seed', type=int, default=0,\n help='random seed')\n parser.add_argument('--plot-data', dest='plot_data', action='store_true',\n help='plot time-series of initial data')\n parser.add_argument('--plot-weights', dest='plot_weights', action='store_true',\n help='plot FEM-BV-VARX weights')\n parser.add_argument('--verbose', dest='verbose', action='store_true',\n help='produce verbose output')\n\n args = parser.parse_args()\n\n if args.n_components < 1:\n raise ValueError('Number of components must be at least 1')\n\n if args.n_init < 1:\n raise ValueError('Number of initializations must be at least 1')\n\n if args.max_tv_norm is not None and args.max_tv_norm < 0:\n raise ValueError('Maximum TV norm bound must be non-negative')\n\n if args.memory < 0:\n raise ValueError('FEM-BV-VARX memory must be non-negative')\n\n if args.tolerance <= 0:\n raise ValueError('Convergence tolerance must be positive')\n\n if args.max_iterations < 1:\n raise ValueError('Maximum number of iterations must be at least 1')\n\n if args.n_features < 1:\n raise ValueError('Number of data features must be at least 1')\n\n if args.n_samples < 1:\n raise ValueError('Number of samples must be at least 1')\n\n if args.period <= 0:\n raise ValueError('Period must be positive')\n\n if args.order < 0:\n raise ValueError('Order of generating process must be non-negative')\n\n if args.noise_variance <= 0:\n raise ValueError('Noise variance must be positive')\n\n return args", "def main():\n\n parser = argparse.ArgumentParser(description='Duolingo shared task baseline model')\n parser.add_argument('--train', help='Training file name', required=True)\n parser.add_argument('--test', help='Test file name, to make predictions on', required=True)\n parser.add_argument('--pred', help='Output file name for predictions, defaults to test_name.pred')\n args = parser.parse_args()\n\n if not args.pred:\n args.pred = args.test + '.pred'\n\n assert os.path.isfile(args.train)\n assert os.path.isfile(args.test)\n\n # Assert that the train course matches the test course\n assert os.path.basename(args.train)[:5] == os.path.basename(args.test)[:5]\n\n training_data, training_labels = load_data(args.train)\n test_data = load_data(args.test)\n\n ####################################################################################\n # Here is the delineation between loading the data and running the baseline model. #\n # Replace the code between this and the next comment block with your own. #\n ####################################################################################\n\n\n vectorizer = DictVectorizer()\n X_train = [instance_data.to_features() for instance_data in training_data]\n Y_train = [training_labels[instance_data.instance_id] for instance_data in training_data]\n ids_train = [instance_data.instance_id for instance_data in training_data]\n\n X_test = [instance_data.to_features() for instance_data in test_data]\n ids_test = [instance_data.instance_id for instance_data in test_data]\n\n\n X_train = vectorizer.fit_transform(X_train)\n clf = LogisticRegression()\n clf.fit(X_train,Y_train)\n\n X_test = vectorizer.transform(X_test)\n preds_scores = [x[0] for x in clf.predict_proba(X_test)]\n predictions = dict([(instance_id,pred_score) for instance_id,pred_score in zip(ids_test,preds_scores)])\n\n ####################################################################################\n # This ends the baseline model code; now we just write predictions. #\n ####################################################################################\n\n with open(args.pred, 'wt') as f:\n for instance_id, prediction in iteritems(predictions):\n f.write(instance_id + ' ' + str(prediction) + '\\n')", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()", "def main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-artm\", required=True,\n help=\"Path to directory with BigARTM model\")\n args = parser.parse_args()\n model_artm = artm.load_artm_model(args.model_artm)\n\n with open('topic_issue_model.pickle', 'rb') as issue_pickle_file:\n topic_issue_model: TopicIssueModel = pickle.load(issue_pickle_file)\n predict_topics(topic_issue_model, model_artm)", "def run_vorpastat(self, *options):\n\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpastat\", args)", "def main(opt):\n ##################################################################################################################\n # Setup\n ##################################################################################################################\n # Device handling (CPU, GPU, multi GPU)\n if opt.device is None:\n device = torch.device('cpu')\n opt.n_gpu = 0\n else:\n opt.n_gpu = len(opt.device)\n os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.device[opt.local_rank])\n device = torch.device('cuda:0')\n torch.cuda.set_device(0)\n # In the case of multi GPU: sets up distributed training\n if opt.n_gpu > 1 or opt.local_rank > 0:\n torch.distributed.init_process_group(backend='nccl')\n # Since we are in distributed mode, divide batch size by the number of GPUs\n assert opt.batch_size % opt.n_gpu == 0\n opt.batch_size = opt.batch_size // opt.n_gpu\n # Seed\n if opt.seed is None:\n opt.seed = random.randint(1, 10000)\n else:\n assert isinstance(opt.seed, int) and opt.seed > 0\n print(f'Learning on {opt.n_gpu} GPU(s) (seed: {opt.seed})')\n random.seed(opt.seed)\n np.random.seed(opt.seed + opt.local_rank)\n torch.manual_seed(opt.seed)\n # cuDNN\n if opt.n_gpu > 1 or opt.local_rank > 0:\n assert torch.backends.cudnn.enabled\n cudnn.deterministic = True\n # Mixed-precision training\n if opt.torch_amp and not torch_amp_imported:\n raise ImportError('Mixed-precision not supported by this PyTorch version, upgrade PyTorch or use Apex')\n if opt.apex_amp and not apex_amp_imported:\n raise ImportError('Apex not installed (https://github.com/NVIDIA/apex)')\n\n ##################################################################################################################\n # Data\n ##################################################################################################################\n print('Loading data...')\n # Load data\n dataset = data.load_dataset(opt, True)\n trainset = dataset.get_fold('train')\n valset = dataset.get_fold('val')\n # Change validation sequence length, if specified\n if opt.seq_len_test is not None:\n valset.change_seq_len(opt.seq_len_test)\n\n # Handle random seed for dataloader workers\n def worker_init_fn(worker_id):\n np.random.seed((opt.seed + itr + opt.local_rank * opt.n_workers + worker_id) % (2**32 - 1))\n # Dataloader\n sampler = None\n shuffle = True\n if opt.n_gpu > 1:\n # Let the distributed sampler shuffle for the distributed case\n sampler = torch.utils.data.distributed.DistributedSampler(trainset)\n shuffle = False\n train_loader = DataLoader(trainset, batch_size=opt.batch_size, collate_fn=data.collate_fn, sampler=sampler,\n num_workers=opt.n_workers, shuffle=shuffle, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn)\n val_loader = DataLoader(valset, batch_size=opt.batch_size_test, collate_fn=data.collate_fn,\n num_workers=opt.n_workers, shuffle=True, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn) if opt.local_rank == 0 else None\n\n ##################################################################################################################\n # Model\n ##################################################################################################################\n # Buid model\n print('Building model...')\n model = srvp.StochasticLatentResidualVideoPredictor(opt.nx, opt.nc, opt.nf, opt.nhx, opt.ny, opt.nz, opt.skipco,\n opt.nt_inf, opt.nh_inf, opt.nlayers_inf, opt.nh_res,\n opt.nlayers_res, opt.archi)\n model.init(res_gain=opt.res_gain)\n # Make the batch norms in the model synchronized in the distributed case\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import convert_syncbn_model\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.to(device)\n\n ##################################################################################################################\n # Optimizer\n ##################################################################################################################\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n opt.n_iter = opt.lr_scheduling_burnin + opt.lr_scheduling_n_iter\n lr_sch_n_iter = opt.lr_scheduling_n_iter\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lr_lambda=lambda i: max(0, (lr_sch_n_iter - i) / lr_sch_n_iter))\n\n ##################################################################################################################\n # Automatic Mixed Precision\n ##################################################################################################################\n scaler = None\n if opt.torch_amp:\n scaler = torch_amp.GradScaler()\n if opt.apex_amp:\n model, optimizer = apex_amp.initialize(model, optimizer, opt_level=opt.amp_opt_lvl,\n keep_batchnorm_fp32=opt.keep_batchnorm_fp32,\n verbosity=opt.apex_verbose)\n\n ##################################################################################################################\n # Multi GPU\n ##################################################################################################################\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import DistributedDataParallel\n forward_fn = DistributedDataParallel(model)\n else:\n forward_fn = torch.nn.parallel.DistributedDataParallel(model)\n else:\n forward_fn = model\n\n ##################################################################################################################\n # Training\n ##################################################################################################################\n cudnn.benchmark = True # Activate benchmarks to select the fastest algorithms\n assert opt.n_iter > 0\n itr = 0\n finished = False\n # Progress bar\n if opt.local_rank == 0:\n pb = tqdm(total=opt.n_iter, ncols=0)\n # Current and best model evaluation metric (lower is better)\n val_metric = None\n best_val_metric = None\n try:\n while not finished:\n if sampler is not None:\n sampler.set_epoch(opt.seed + itr)\n # -------- TRAIN --------\n for batch in train_loader:\n # Stop when the given number of optimization steps have been done\n if itr >= opt.n_iter:\n finished = True\n status_code = 0\n break\n\n itr += 1\n model.train()\n # Optimization step on batch\n # Allow PyTorch's mixed-precision computations if required while ensuring retrocompatibilty\n with (torch_amp.autocast() if opt.torch_amp else nullcontext()):\n loss, nll, kl_y_0, kl_z = train(forward_fn, optimizer, scaler, batch, device, opt)\n\n # Learning rate scheduling\n if itr >= opt.lr_scheduling_burnin:\n lr_scheduler.step()\n\n # Evaluation and model saving are performed on the process with local rank zero\n if opt.local_rank == 0:\n # Evaluation\n if itr % opt.val_interval == 0:\n model.eval()\n val_metric = evaluate(forward_fn, val_loader, device, opt)\n if best_val_metric is None or best_val_metric > val_metric:\n best_val_metric = val_metric\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model_best.pt'))\n\n # Checkpointing\n if opt.chkpt_interval is not None and itr % opt.chkpt_interval == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, f'model_{itr}.pt'))\n\n # Progress bar\n if opt.local_rank == 0:\n pb.set_postfix({'loss': loss, 'nll': nll, 'kl_y_0': kl_y_0, 'kl_z': kl_z, 'val_metric': val_metric,\n 'best_val_metric': best_val_metric}, refresh=False)\n pb.update()\n\n except KeyboardInterrupt:\n status_code = 130\n\n if opt.local_rank == 0:\n pb.close()\n # Save model\n print('Saving...')\n if opt.local_rank == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model.pt'))\n print('Done')\n return status_code", "def main():\n parser = argparse.ArgumentParser(\n usage = '%(prog)s [OPTIONS] [ARGS...]',\n description='Calculate something',\n epilog='Contact simon.clematide@uzh.ch'\n )\n parser.add_argument('--version', action='version', version='0.99')\n parser.add_argument('-l', '--logfile', dest='logfile',\n help='write log to FILE', metavar='FILE')\n parser.add_argument('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_argument('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n parser.add_argument('-c', '--corpus_dir',\n action='store', dest='corpus_dir', default='corpus',\n help='directory with corpus data %(default)')\n parser.add_argument('-m', '--model_dir',\n action='store', dest='model_dir', default='model',\n help='directory with model data %(default)')\n parser.add_argument('-B', '--is_backward_lm',\n action='store_true', dest='is_backward_lm', default=False,\n help='build backward model')\n parser.add_argument('args', nargs='*')\n options = parser.parse_args()\n if options.logfile:\n logging.basicConfig(filename=logfile)\n if options.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n process(options)", "def handle_arguments():\n # process the command options\n parser = argparse.ArgumentParser()\n parser.add_argument('images', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\images.npy\"')\n parser.add_argument('labels', type=str, help='provide path in style: '\n r'\"kaggle\\input\\bengaliai-cv19\\labels.csv\"')\n parser.add_argument('-t', '--test_ratio', type=float, default=0.2,\n help='proportion of data for testing, default: 0.2')\n parser.add_argument('-s', '--seed', type=int, default=None, help='seed '\n 'used for consistent data splitting, default: None')\n parser.add_argument('-a', '--data_augmentation', action='store_true',\n help='switch to augment the images')\n drop_info_fns = ['cutout', 'gridmask', 'None'] # info dropping algorithms\n parser.add_argument('-d', '--drop_info_fn', type=str, choices=drop_info_fns,\n default=None, help='whether cutout, GridMask, or no '\n 'information dropping algorithm is used, default: None')\n parser.add_argument('-c', '--class_balancing', action='store_true',\n help='switch to perform class balancing')\n parser.add_argument('-b', '--batch_size', type=int, default=32,\n help='batch size of DataLoader objects, default: 32')\n parser.add_argument('-l', '--label_smoothing', action='store_true',\n help='switch to use soft targets in loss computation')\n parser.add_argument('-e', '--epochs', type=int, default=50, help='number '\n 'of iterations over training data, default: 50')\n parser.add_argument('-m', '--model', type=str, default='model.pt',\n help='path to save trained model, default: \"model.pt\"')\n\n # parse and print arguments\n args = parser.parse_args()\n for arg in vars(args):\n print(f'{arg.upper()}: {getattr(args, arg)}')\n\n return args", "def main():\n args = load_args()\n\n perturbation_file = args.perturbation_file\n vm_params = load_yaml(args.vm_params_location)\n processes = args.n_processes\n verbose = args.verbose\n\n if args.perturbation:\n if args.model:\n perturbation_model = pd.read_csv(args.model)\n generate_velocity_model_perturbation_file_from_model(\n vm_params, perturbation_model, perturbation_file, processes, verbose\n )\n elif args.parameter_file:\n common_params, layer_params = load_parameter_file(args.parameter_file)\n generate_velocity_model_perturbation_file_from_config(\n common_params, layer_params, perturbation_file, processes, verbose\n )\n else:\n create_constant_vm_file(\n perturbation_file, vm_params[\"nx\"] * vm_params[\"ny\"] * vm_params[\"nz\"]\n )\n\n if args.fault_damage_zone:\n apply_fault_damage_zone(\n srf_location=args.srf_location,\n vm_params=vm_params,\n pert_f_location=perturbation_file,\n depth_km=args.depth_km,\n max_depth_km=args.max_depth_km,\n width_km=args.width_km,\n max_width_km=args.max_width_km,\n min_damage_velocity=args.max_velocity_drop,\n n_processes=processes,\n )", "def process_args():\n\n parser = argparse.ArgumentParser()\n\n # argument group for parameters related to input/output\n # (e.g. filenames, logging/verbosity options, target genes)\n #\n # these don't affect the model output, and thus don't need to be saved\n # with the results of the experiment\n io = parser.add_argument_group('io',\n 'arguments related to script input/output, '\n 'note these will *not* be saved in metadata ')\n io.add_argument('--custom_genes', nargs='*', default=None,\n help='currently this needs to be a subset of top_50')\n io.add_argument('--gene_set', type=str,\n choices=['top_50', 'vogelstein', 'custom'],\n default='top_50',\n help='choose which gene set to use. top_50 and vogelstein are '\n 'predefined gene sets (see data_utilities), and custom allows '\n 'any gene or set of genes in TCGA, specified in --custom_genes')\n io.add_argument('--log_file', default=None,\n help='name of file to log skipped genes to')\n io.add_argument('--results_dir', default=cfg.results_dirs['multimodal'],\n help='where to write results to')\n io.add_argument('--verbose', action='store_true')\n\n # argument group for parameters related to model training/evaluation\n # (e.g. model hyperparameters, preprocessing options)\n #\n # these affect the output of the model, so we want to save them in the\n # same directory as the experiment results\n opts = parser.add_argument_group('model_options',\n 'parameters for training/evaluating model, '\n 'these will affect output and are saved as '\n 'experiment metadata ')\n opts.add_argument('--debug', action='store_true',\n help='use subset of data for fast debugging')\n opts.add_argument('--n_dim', nargs='*', default=None,\n help='list of compressed dimensions to use, defaults to '\n 'uncompressed data for all data types')\n opts.add_argument('--num_folds', type=int, default=4,\n help='number of folds of cross-validation to run')\n opts.add_argument('--overlap_data_types', nargs='*',\n default=['expression'],\n help='data types to define set of samples to use; e.g. '\n 'set of data types for a model comparison, use only '\n 'overlapping samples from these data types')\n opts.add_argument('--seed', type=int, default=cfg.default_seed)\n opts.add_argument('--subset_mad_genes', type=int, default=cfg.num_features_raw,\n help='if included, subset gene features to this number of '\n 'features having highest mean absolute deviation')\n opts.add_argument('--training_data', nargs='*', default=['expression'],\n help='which data types to train model on')\n\n args = parser.parse_args()\n\n args.results_dir = Path(args.results_dir).resolve()\n\n if args.log_file is None:\n args.log_file = Path(args.results_dir, 'log_skipped.tsv').resolve()\n\n if args.gene_set == 'custom':\n if args.custom_genes is None:\n parser.error('must include --custom_genes when --gene_set=\\'custom\\'')\n args.gene_set = args.custom_genes\n del args.custom_genes\n elif (args.gene_set != 'custom' and args.custom_genes is not None):\n parser.error('must use option --gene_set=\\'custom\\' if custom genes are included')\n\n # check that all training data types are defined in config\n if (len(set(args.training_data).intersection(set(cfg.data_types.keys()))) !=\n len(set(args.training_data))):\n parser.error('training_data data types must be in config.data_types')\n\n # check that all data types in overlap_data_types are valid\n #\n # here I'm just checking this argument against the non-compressed data types,\n # downstream code will check if data types we request compressed data for\n # really have compressed data, but don't need to catch that here\n check_all_data_types(parser, args.overlap_data_types, args.debug)\n\n # split args into defined argument groups, since we'll use them differently\n arg_groups = du.split_argument_groups(args, parser)\n io_args, model_options = arg_groups['io'], arg_groups['model_options']\n\n # if no n_dim argument provided, set all to None\n if model_options.n_dim is None:\n model_options.n_dim = [None] * len(model_options.training_data)\n else:\n # convert None strings from argparse to python Nones\n model_options.n_dim = (\n [None if n == 'None' else n for n in model_options.n_dim]\n )\n\n # add some additional hyperparameters/ranges from config file to model options\n # these shouldn't be changed by the user, so they aren't added as arguments\n model_options.alphas = cfg.alphas\n model_options.l1_ratios = cfg.l1_ratios\n\n # for these experiments, we need to standardize all data types that are not\n # already PCA compressed\n model_options.standardize_data_types = (\n [t for ix, t in enumerate(model_options.training_data)\n if model_options.n_dim[ix] == None]\n )\n\n return io_args, model_options", "def train_naive(): # add arguments as needed\n pass", "def add_args(parser):\n NATransformerModel.add_args(parser)\n parser.add_argument('--share-encoder-embeddings', action='store_true',\n help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action='store_true',\n help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true',\n help='share encoders across languages')\n parser.add_argument('--student-arch', default=\"nonautoregressive_transformer\",\n help='determine the type of student network to mutual learn from.') \n parser.add_argument('--teacher-arch', default=\"transformer\",\n help='determine the type of teacher network to mutual learn from.')\n\n parser.add_argument('--load-to-teacher', action='store_true',\n help='load checkpoint to teacher network.')\n parser.add_argument('--freeze-teacher', action='store_true',\n help='whether to freeze teacher.')\n\n parser.add_argument(\"--student-kd-factor\",\n default=.5,\n type=float,\n help=\"weights on the knowledge distillation loss for training student\"\n )\n parser.add_argument(\"--teacher-kd-factor\",\n default=.5,\n type=float,\n help=\"weights on the knowledge distillation loss for training teacher\"\n )\n parser.add_argument(\"--control-kd-factor\", action=\"store_true\",\n help=\"use the PI algorithm introduced in ControlVAE to calculate the weight on KL-divergence on latent.\")\n parser.add_argument('--control-kd-args', type=str, metavar='JSON',\n help=\"\"\"args for ControlVAE, a valid setup is: '{\"v_kl\": 3.0, \"Kp\": 0.01, \"Ki\": 0.0001, \"beta_min\": 0.0, \"beta_max\": 1.0 }' \"\"\")\n\n\n # inference flags\n parser.add_argument('--reduce-to-student', action='store_true',\n help='when inference, only load student network.')\n parser.add_argument('--reduce-to-teacher', action='store_true',\n help='when inference, only load teacher network.')", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def main(args):\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)", "def main_train(args):\n if args.preprocessing is not None:\n raise NotImplementedError(\"ASV Preprocessing is not yet supported\")\n if args.output is None and args.snapshot_rate is not None:\n raise RuntimeError(\"Can not save snapshots without output\")\n\n # Create and load cm network\n cm_model, cm_feature_network = create_cm_network(args)\n cm_state_dict = torch.load(args.cm_model)\n # Remove the bonafide_average vector stored with\n # CM models\n average_bonafide = cm_state_dict.pop(\"average_bonafide\")\n # Do not load parameters if so desired\n if not args.from_scratch:\n cm_model.load_state_dict(cm_state_dict)\n # Get CM embedding size we are about to feed to ASV system\n cm_embedding_size = average_bonafide.shape[0]\n\n # Create and load ASV network\n asv_model, asv_feature_network = create_asv_network(XVECTOR_FEATURE_SIZE, args)\n asv_state_dict = torch.load(args.asv_model)\n # Remove any preprocessing steps\n preprocessing_parameters = {\n \"centering_mean\": asv_state_dict.pop(\"centering_mean\", None),\n \"lda\": asv_state_dict.pop(\"lda\", None)\n }\n if not args.from_scratch:\n asv_model.load_state_dict(asv_state_dict)\n\n asv_features, cm_features, speaker_labels, is_targets, is_spoofs = load_joint_train_data_asvspoof(\n args.joint_filelist, args.joint_asv_directory, args.joint_cm_directory,\n )\n\n # Move CM features to cuda already\n numpy_to_torch_cuda = lambda arr: torch.from_numpy(arr).float().cuda()\n cm_features = list(map(numpy_to_torch_cuda, cm_features))\n\n # Split bonafide spoof samples to a separate list for\n # saving CM model\n bonafide_cm_features = [cm_features[i] for i in range(len(cm_features)) if not is_spoofs[i]]\n\n # Preprocess data a little bit: Create a dictionary\n # that maps speaker labels to indeces where speaker has\n # bona fide samples\n speaker_label_to_indeces = {}\n for i, (speaker_label, is_target) in enumerate(zip(speaker_labels, is_targets)):\n if is_target:\n # Speaker label and input features match,\n # add this index to list of valid samples for that\n # speaker\n speaker_label_to_indeces[speaker_label] = (\n speaker_label_to_indeces.get(speaker_label, []) + [i]\n )\n\n # Also separate indeces of nontarget/target trials\n is_targets = np.array(is_targets)\n target_indeces = np.where(is_targets)[0]\n nontarget_indeces = np.where(~is_targets)[0]\n\n all_parameters = list(asv_model.parameters())\n all_parameters.extend(list(cm_model.parameters()))\n # SGD is stabler than Adam, but need to see how many epochs it takes to train\n optimizer = None\n if args.optimizer == \"adam\":\n optimizer = torch.optim.Adam(all_parameters, weight_decay=args.l2_weight, lr=args.lr)\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(all_parameters, weight_decay=args.l2_weight, lr=args.lr)\n\n num_samples = len(asv_features)\n total_iterations = int(args.epochs * (num_samples // BATCH_SIZE))\n\n progress_bar = tqdm(total=total_iterations)\n loss_deque = deque(maxlen=100)\n\n loss_function = LOSS_FUNCTIONS[args.loss]\n\n for update_i in range(total_iterations):\n (cm_inputs, asv_inputs), targets, is_spoof = create_joint_sample_batch(\n asv_features,\n cm_features,\n speaker_label_to_indeces,\n target_indeces,\n nontarget_indeces,\n is_spoofs,\n BATCH_SIZE\n )\n\n asv_predictions = asv_model(*asv_inputs)\n cm_predictions = cm_model(*cm_inputs)\n\n # Predictions are logits, turn into probablities\n asv_predictions = torch.sigmoid(asv_predictions)\n cm_predictions = torch.sigmoid(cm_predictions)\n\n # Loss should return a scalar value we then backprop through\n # and update parameters to minimize it\n loss = loss_function(asv_predictions, cm_predictions, targets, is_spoof, args)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_deque.append(loss.item())\n average_loss = np.mean(loss_deque)\n progress_bar.update(1)\n progress_bar.set_description(\"Loss: {:2.5f}\".format(average_loss))\n progress_bar.refresh()\n\n if args.snapshot_rate is not None and (update_i % args.snapshot_rate) == 0:\n save_models(\n args.output + \"_updates_{}\".format(update_i),\n asv_model,\n preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n )\n\n\n if args.output is not None:\n save_models(\n args.output,\n asv_model,\n preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n )", "def run(prog, args):\n\t#parse arguments\n\tparser = ArgumentParser(prog = prog, usage=\"%(prog)s [-options] [input] [output]\\n\", version=\"%(prog)s v0.1.1\")\n\tparser.add_argument(\"--exclude\", type = str, dest=\"exclude\", help=\"exclude mtDNA sites from analysis\")\n\tparser.add_argument(\"--remove\", type = str, dest=\"remove\", help=\"remove families from analysis\")\n\tparser.add_argument(\"--keep\", type = str, dest=\"keep\", help=\"keep only the families for analysis\")\n\tparser.add_argument(\"--depth\", type = float, default = 10, dest=\"depth\", help=\"the minimum read depth of all variants\")\n\tparser.add_argument(\"--depth-min\", type = float, default = 40, dest=\"depth_min\", help=\"the minimum read depth of heteroplasmies\")\n\tparser.add_argument(\"--hq-min\", type = float, default = 0.7, dest=\"hq_min\", help=\"the minimum ratio of high-quality reads of heteroplasmies\")\n\tparser.add_argument(\"--llr-min\", type = float, default = 5, dest=\"llr_min\", help=\"the minimum quality score of heteroplasmies\")\n\tparser.add_argument(\"--sbias-min\", type = float, default = 0.001, dest=\"sbias_min\", help=\"the minimum P value for strand bias analysis of heteroplasmies\")\n\tparser.add_argument(\"--frac-min\", type = float, default = 0.01, dest=\"frac_min\", help=\"the minimum minor allele fraction of heteroplasmies\")\n\tparser.add_argument(\"--dev-frac-min\", type = float, default = 0.90, dest=\"dev_frac_min\", help=\"the minimum variant allele fraction of homoplasmies\")\n\tparser.add_argument(\"--annotate\", type = str, dest=\"annotate\", help=\"annotate variants according to the file specified\")\n\tparser.add_argument(\"--output-ped\", default=False, action=\"store_true\", dest=\"output_ped\", help=\"output the variants detected to a ped file\")\n\tparser.add_argument(\"--output-hsd\", default=False, action=\"store_true\", dest=\"output_hsd\", help=\"output major allele to the hsd file\")\n\tparser.add_argument(\"--output-minor-hsd\", default=False, action=\"store_true\", dest=\"output_minor_hsd\", help=\"output minor allele to the hsd file\")\n\tparser.add_argument(\"input\", help=\"the variant file output from scan\")\n\tparser.add_argument(\"output\", help=\"the prefix of output files\")\n\toptions = parser.parse_args(args)\n\t\n\t#initialize globle variables\n\tglobal llr_mim, sbias_min\n\tllr_min = options.llr_min\n\tsbias_min = options.sbias_min\n\t\n\tpos_excl = {}\n\tif (options.exclude):\n\t\twith open(options.exclude) as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.strip()\n\t\t\t\ttry:\n\t\t\t\t\tpos_excl[int(line)] = 1\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\tfam_excl = {}\n\tif (options.remove):\n\t\twith open(options.remove) as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.strip()\n\t\t\t\ttry:\n\t\t\t\t\tfam_excl[int(line)] = 1\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\thead, data = readMtVariant(options.input, fam_excl, pos_excl)\n\t#head = \"family\\tsample\\tchr\\tpos\\tref\\tdepth\\tdepth_fwd\\tdepth_rev\\tallele\\tA\\tT\\tC\\tG\\ta\\tt\\tc\\tg\\theteroplasmy\\tsubstitution\\thet_allele\\thet_freq\\thet_freq_mle\\thet_freq_llr\\thet_low\\thet_high\\thet_p_fisher\\thet_p_sbias\".split(\"\\t\")\n\thead.append(\"stat\")\n\t\n\tannot = {}\n\tannot_len = 0\n\tif (options.annotate):\n\t\t#read annotation file\n\t\t#build annotation table\n\t\tif (options.annotate.endswith(\".csv\")):\n\t\t\tdelim = \",\"\n\t\telse:\n\t\t\tdelim = \"\\t\"\n\t\twith open(options.annotate, \"r\") as fh:\n\t\t\tline = fh.readline()\n\t\t\tline = line.rstrip(\"\\r\\n\").split(delim)\n\t\t\tn = line.index(\"id\")\n\t\t\thead.extend(line[n+1:])\n\t\t\tannot_len = len(line) - n - 1\n\t\t\tfor line in fh:\n\t\t\t\tline = line.rstrip(\"\\r\\n\").split(delim)\n\t\t\t\tif (not line):\n\t\t\t\t\tcontinue\n\t\t\t\tid = line[n]\n\t\t\t\tannot[id] = line[n+1:]\n\tannot_null = [\"\",]*annot_len\n\t\n\tif (options.output_hsd):\n\t\tout_hsd = open(options.output + \".qc.hsd\", \"wb\")\n\t\tout_hsd.write(\"SampleId\\tRange\\tHaplogroup\\tPolymorphisms (delimited with tabs)\\n\")\n\t\tif (options.output_minor_hsd):\n\t\t\tout_minor_hsd = open(options.output + \"minor.qc.hsd\", \"wb\")\n\t\t\tout_minor_hsd.write(\"SampleId\\tRange\\tHaplogroup\\tPolymorphisms (delimited with tabs)\\n\")\n\t\telse:\n\t\t\tout_minor_hsd = None\n\telse:\n\t\tout_hsd = None\n\t\tout_minor_hsd = None\n\t\n\tvar_all = getMtVariant(data, depth_min = 0, depth_ratio_min = 0)\n\tsample_all = []\n\tif (options.keep):\n\t\twith open(options.keep, \"rb\") as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.rstrip(\"\\r\\n\")\n\t\t\t\tif (not line):\n\t\t\t\t\tcontinue\n\t\t\t\tfamily, sample = line.split(\"\\t\")\n\t\t\t\tif (family not in fam_excl):\n\t\t\t\t\tsample_all.append([family, sample])\n\telse:\n\t\tfor family in sorted(var_all.keys()):\n\t\t\tfor sample in sorted(var_all[family].keys()):\n\t\t\t\tsample_all.append([family, sample])\n\t\n\t#output sample names\n\t#order corresponds to that of the samples in the ped file and the hsd file\n\twith open(options.output + \".qc.tfam\", \"wb\") as out_fam:\n\t\tfor family, sample in sample_all:\n\t\t\t#use the default phenotype value -9\n\t\t\tout_fam.write(\"\\t\".join([family, sample, \"0\", \"0\", \"-9\", \"-9\"])+\"\\n\")\n\t\n\tsites_all = {}\n\twith open(options.output + \".qc.annot\", \"wb\") as out:\n\t\t#output the head line\n\t\tout.write(\"\\t\".join(head) + \"\\n\")\n\t\tidx = 0 #sample idx\n\t\tfor family, sample in sample_all:\n\t\t\tif (family in var_all and sample in var_all[family]):\n\t\t\t\tvar = var_all[family][sample]\n\t\t\telse:\n\t\t\t\tvar = {}\n\t\t\thomoplasmy = []\n\t\t\theteroplasmy = []\n\t\t\tfor pos in sorted(var.keys()):\n\t\t\t\tv = var[pos]\n\t\t\t\tif (v):\n\t\t\t\t\tif (isHeteroplasmy(v, depth_min = options.depth_min, depth_strand = 0, depth_ratio_min = options.hq_min, freq_min = options.frac_min)):\n\t\t\t\t\t\tstat = \"heteroplasmy\"\n\t\t\t\t\t\theteroplasmy.append(v)\n\t\t\t\t\t\tadd_var = True\n\t\t\t\t\t\ta1 = v.allele\n\t\t\t\t\t\ta2 = v.alt_allele\n\t\t\t\t\telif (v.depth >= options.depth and v.allele != v.ref and v.dev_freq >= options.dev_frac_min):\n\t\t\t\t\t\tstat = \"homoplasmy\"\n\t\t\t\t\t\thomoplasmy.append(v)\n\t\t\t\t\t\tadd_var = True\n\t\t\t\t\t\ta1 = a2 = v.allele\n\t\t\t\t\telif (v.alt_freq_raw > options.frac_min):\n\t\t\t\t\t\t#variant does not pass the filters of variant quality and strand bias (see MTVariant)\n\t\t\t\t\t\tstat = \"heteroplasmy possible\"\n\t\t\t\t\t\tadd_var = False\n\t\t\t\t\t\ta1 = a2 = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tstat = \"unkown\"\n\t\t\t\t\t\tadd_var = False\n\t\t\t\t\t\ta1 = a2 = None\n\t\t\t\t\tout.write(\"\\t\".join(v.line_cache + [stat,] + annot.get(v.id,annot_null))+\"\\n\")\n\t\t\t\telse:\n\t\t\t\t\tstat = \"unkown\"\n\t\t\t\t\tadd_var = True\n\t\t\t\t\ta1 = a2 = \"N\"\n\t\t\t\tif (add_var):\n\t\t\t\t\tif (pos not in sites_all):\n\t\t\t\t\t\tsites_all[pos] = [0, 0, 0, {}] ##homoplamy, #heteroplasmy, #missing, #{sample: allele}\n\t\t\t\t\tsite = sites_all[pos]\n\t\t\t\t\tsite[3][idx]= a1+\"\\t\"+a2\n\t\t\t\t\tif (a1 != a2):\n\t\t\t\t\t\tsite[1] += 1\n\t\t\t\t\telif (a2 == \"N\"):\n\t\t\t\t\t\tsite[2] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsite[0] += 1\n\t\t\tidx += 1\n\t\t\tif (out_hsd):\n\t\t\t\t#use sample index (one-based) instead of the real sample name\n\t\t\t\t#output major alleles\n\t\t\t\tmajor_allele = [[v.pos,str(v.allele)] for v in homoplasmy] + [[v.pos,str(v.allele)] for v in heteroplasmy if v.dev_freq >= options.dev_frac_min]\n\t\t\t\tif (not major_allele):\n\t\t\t\t\tmajor_allele = [\"1G\",]\n\t\t\t\telse:\n\t\t\t\t\tmajor_allele.sort()\n\t\t\t\t\tmajor_allele = [str(p)+str(a) for p, a in major_allele]\n\t\t\t\tout_hsd.write(\"\\t\".join([str(idx),\"1-16569;\",\"?\"] + major_allele)+\"\\n\")\n\t\t\t\tif (out_minor_hsd):\n\t\t\t\t\tminor_allele = [[v.pos,str(v.alt_allele)] for v in heteroplasmy]\n\t\t\t\t\tif (minor_allele):\n\t\t\t\t\t\tminor_allele.sort()\n\t\t\t\t\t\tminor_allele = [str(p)+str(a) for p, a in minor_allele]\n\t\t\t\t\tout_minor_hsd.write(\"\\t\".join([str(idx),\"1-16569;\",\"?\"] + minor_allele)+\"\\n\")\n\n\tif (out_hsd):\n\t\tout_hsd.close()\n\t\tif (out_minor_hsd):\n\t\t\tout_minor_hsd.close()\n\t\n\tif (options.output_ped):\n\t\tsites = sorted(sites_all.keys())\n\t\tout_ped = open(options.output + \".qc.tped\", \"wb\")\n\t\tout_map = open(options.output + \".qc.map\", \"wb\")\n\t\tfor i in sites:\n\t\t\tsite = sites_all[i]\n\t\t\tout_ped.write(\"\\t\".join([\"26\",str(i)+reference_seq[i],\"0\",str(i)]))\n\t\t\tout_ped.write(\"\\t\")\n\t\t\tsite_sample = site[3]\n\t\t\tref = reference_seq[i]+\"\\t\"+reference_seq[i]\n\t\t\tout_ped.write(\"\\t\".join([site_sample.get(j,ref) for j in range(len(sample_all))]))\n\t\t\tout_ped.write(\"\\n\")\n\t\t\tout_map.write(\"\\t\".join([\"26\",str(i)+reference_seq[i],\"0\",str(i)]+list(map(str,site[:3])))+\"\\n\")\n\t\"\"\"\n\tas plink does not handle multi-allelic variants\n\tto remove these variants in R\n\tnallele <- apply(tped[,5:ncol(tped)],1,function(x){length(unique(x))})\n\twrite.table(tped[nallele==2,], \"biallele.tped\", sep=\"\\t\", quote = F,col.names = F, row.names = F)\n\t\"\"\"", "def main(**kwargs):\n\n wrap_predict(kwargs['input'],\n kwargs['output'],\n kwargs['pipeline'])", "def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)", "def main(args: List[str]):\n argv = {a.split('=')[0]: a.split('=')[1] for a in args[1:]}\n\n load_path = argv.get('load_path', None)\n assert load_path, \"No load_path specified\"\n\n batch_size = int(argv.get('batch_size', 128))\n\n device = argv.get('device', None)\n\n text_path = argv.get('text_path', None)\n\n hashtag_analysis = argv.get('hashtag', 'true').lower()\n assert hashtag_analysis in ['true', 'false']\n hashtag_analysis = False if 'f' in hashtag_analysis else True\n\n fast_tokenizer = argv.get('fast_tokenizer', 'false').lower()\n assert fast_tokenizer in ['true', 'false']\n fast_tokenizer = False if 'f' in fast_tokenizer else True\n\n if text_path is None:\n data_path = get_data_path()\n _text_path = Path(data_path, 'test_data.txt')\n if _text_path.is_file():\n text_path = _text_path\n else:\n logger.error(\"No text_path specified\")\n exit(0)\n\n logger.info(f\"Predicting sentiment from data inside {text_path}\")\n\n if not hashtag_analysis:\n trans_predict = TransformersPredict(load_path=load_path, text_path=text_path, device=device,\n fast_tokenizer=fast_tokenizer)\n else:\n freq_threshold = int(argv.get('hashtag_freq', 500))\n prob_threshold = float(argv.get('hashtag_prob', 0.7))\n trans_predict = TransformersPredictWithHashtag(load_path=load_path, text_path=text_path, device=device,\n fast_tokenizer=fast_tokenizer,\n freq_threshold=freq_threshold,\n prob_threshold=prob_threshold)\n trans_predict.predict(batch_size=batch_size)\n trans_predict.submissionToFile()", "def add_train_args(parser):\n\n # Runtime environment\n runtime = parser.add_argument_group('Environment')\n runtime.add_argument('--dataset', type=str, default=\"searchqa\",\n help='Dataset: searchqa, quasart or unftriviaqa')\n runtime.add_argument('--base_dir', type=str, default=\".\",\n help='base_dir of the pre-processing')", "def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)", "def uq_ensemble(config=\"dummy_test\", script=\"ERROR: PARAMETER script SHOULD BE DEFINED FOR TASK UQ_ENSEMBLE\",**args):\n \n path_to_config = find_config_file_path(config)\n sweep_dir = path_to_config + \"/SWEEP\"\n env.script = script\n\n run_ensemble(config, sweep_dir, **args)", "def run(args=None):\n parser = OptionParser(description='Shows how to use different IK solutions for arms with few joints.')\n OpenRAVEGlobalArguments.addOptions(parser)\n parser.add_option('--scene',action=\"store\",type='string',dest='scene',default='tridoftable.env.xml',\n help='Scene file to load (default=%default)')\n parser.add_option('--manipname',action=\"store\",type='string',dest='manipname',default=None,\n help='name of manipulator to use (default=%default)')\n (options, leftargs) = parser.parse_args(args=args)\n OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)", "def main():\r\n\r\n option_parser, opts, args = parse_command_line_parameters(**script_info)\r\n\r\n # additional option checks\r\n if opts.chimera_detection_method == 'blast_fragments':\r\n if not (opts.blast_db or opts.reference_seqs_fp):\r\n option_parser.error('Must provide either --blast_db or' +\r\n ' --reference_seqs_fp and --id_to_taxonomy_fp when' +\r\n ' method is blast_fragments.')\r\n if not opts.id_to_taxonomy_fp:\r\n option_parser.error('Must provide --id_to_taxonomy_fp when method' +\r\n ' is blast_fragments.')\r\n if opts.num_fragments < 2:\r\n option_parser.error('Invalid number of fragments (-n %d) Must be >= 2.'\r\n % opts.num_fragments)\r\n elif opts.chimera_detection_method == 'ChimeraSlayer':\r\n if not opts.aligned_reference_seqs_fp:\r\n option_parser.error(\"Must provide --aligned_reference_seqs_fp \"\r\n \"when using method ChimeraSlayer\")\r\n elif opts.chimera_detection_method == 'usearch61':\r\n if opts.suppress_usearch61_ref and opts.suppress_usearch61_denovo:\r\n option_parser.error(\"Supressing both de novo and reference \"\r\n \"chimera detection not allowed.\")\r\n if not opts.reference_seqs_fp and not opts.suppress_usearch61_ref:\r\n option_parser.error(\"--reference_seqs_fp required for reference \"\r\n \"based chimera detection, suppress reference based chimera \"\r\n \"detection with --suppress_usearch61_ref\")\r\n if opts.reference_seqs_fp:\r\n try:\r\n temp_f = open(opts.reference_seqs_fp, \"U\")\r\n temp_f.close()\r\n except IOError:\r\n raise IOError(\"Unable to open --reference_seqs_fp, please \"\r\n \"check filepath and permissions.\")\r\n if opts.non_chimeras_retention not in ['intersection', 'union']:\r\n option_parser.error(\"--non_chimeras_retention must be either \"\r\n \"'union' or 'intersection'\")\r\n if opts.usearch61_xn <= 1:\r\n option_parser.error(\"--usearch61_xn must be > 1\")\r\n if opts.usearch61_dn <= 0:\r\n option_parser.error(\"--usearch61_dn must be > 0\")\r\n if opts.usearch61_mindiffs <= 0:\r\n option_parser.error(\"--usearch61_mindiffs must be > 0\")\r\n if opts.usearch61_mindiv <= 0:\r\n option_parser.error(\"--usearch61_mindiv must be > 0\")\r\n if opts.usearch61_abundance_skew <= 0:\r\n option_parser.error(\"--usearch61_abundance_skew must be > 0\")\r\n\r\n verbose = opts.verbose # not used yet ...\r\n input_seqs_fp = opts.input_fasta_fp\r\n id_to_taxonomy_fp = opts.id_to_taxonomy_fp\r\n reference_seqs_fp = opts.reference_seqs_fp\r\n chimera_detection_method = opts.chimera_detection_method\r\n num_fragments = opts.num_fragments\r\n output_fp = opts.output_fp\r\n taxonomy_depth = opts.taxonomy_depth\r\n max_e_value = opts.max_e_value\r\n blast_db = opts.blast_db\r\n keep_intermediates = opts.keep_intermediates\r\n threads = opts.threads\r\n\r\n # calculate threads as 1 per CPU, or use float of input value\r\n if threads == 'one_per_cpu':\r\n threads = float(1 / cpu_count())\r\n else:\r\n # Make sure input is a float\r\n try:\r\n threads = float(threads)\r\n except ValueError:\r\n option_parser.error(\"--threads must be a float value if \"\r\n \"default 'one_per_cpu' value overridden.\")\r\n\r\n if not output_fp:\r\n if chimera_detection_method == \"usearch61\":\r\n output_dir = \"usearch61_chimeras/\"\r\n create_dir(output_dir, fail_on_exist=False)\r\n else:\r\n input_basename = splitext(split(input_seqs_fp)[1])[0]\r\n output_fp = '%s_chimeric.txt' % input_basename\r\n elif chimera_detection_method == \"usearch61\":\r\n output_dir = output_fp\r\n create_dir(output_dir, fail_on_exist=False)\r\n\r\n if chimera_detection_method == 'blast_fragments':\r\n blast_fragments_identify_chimeras(input_seqs_fp,\r\n id_to_taxonomy_fp,\r\n reference_seqs_fp, blast_db=blast_db,\r\n num_fragments=opts.num_fragments,\r\n max_e_value=max_e_value,\r\n output_fp=output_fp,\r\n taxonomy_depth=taxonomy_depth)\r\n elif chimera_detection_method == 'ChimeraSlayer':\r\n chimeraSlayer_identify_chimeras(input_seqs_fp,\r\n output_fp=output_fp,\r\n db_FASTA_fp=opts.reference_seqs_fp,\r\n db_NAST_fp=opts.aligned_reference_seqs_fp,\r\n min_div_ratio=opts.min_div_ratio,\r\n keep_intermediates=keep_intermediates)\r\n elif chimera_detection_method == 'usearch61':\r\n usearch61_chimera_check(input_seqs_fp,\r\n output_dir=output_dir,\r\n reference_seqs_fp=reference_seqs_fp,\r\n suppress_usearch61_intermediates=opts.suppress_usearch61_intermediates,\r\n suppress_usearch61_ref=opts.suppress_usearch61_ref,\r\n suppress_usearch61_denovo=opts.suppress_usearch61_denovo,\r\n split_by_sampleid=opts.split_by_sampleid,\r\n non_chimeras_retention=opts.non_chimeras_retention,\r\n usearch61_minh=opts.usearch61_minh,\r\n usearch61_xn=opts.usearch61_xn,\r\n usearch61_dn=opts.usearch61_dn,\r\n usearch61_mindiffs=opts.usearch61_mindiffs,\r\n usearch61_mindiv=opts.usearch61_mindiv,\r\n usearch61_abundance_skew=opts.usearch61_abundance_skew,\r\n percent_id_usearch61=opts.percent_id_usearch61,\r\n minlen=opts.minlen,\r\n word_length=opts.word_length,\r\n max_accepts=opts.max_accepts,\r\n max_rejects=opts.max_rejects,\r\n verbose=opts.verbose,\r\n threads=threads)", "def main():\n # Goal is to model the OSSOS resonance detections given a file with parameters for those resonances.\n # e.g. from Crompvoets et al. (2021)\n\n # now run a survey simulation.\n params = sys.argv[1]\n H_max = float(sys.argv[2])\n outfile=f\"{os.path.splitext(params)[0]}_Model.dat\"\n print(f\"Saving results to {outfile}\")\n if not os.access(outfile, os.R_OK):\n run(outfile, params, 123456789, H_max=H_max)\n\n # confirm this looks like the OSSOS detections using rose plot.\n face_down_plot(outfile)", "def main():\n parser = argparse.ArgumentParser(description='Run dap model on signalmedia data.')\n parser.add_argument('--train_file', type=str, help='Path to training data file.',\n default=\"train_signalmedia.dap\")\n parser.add_argument('--test_file', type=str, help='Path to testing data file. If None, no prediction is run',\n default=\"test_signalmedia.dap\")\n parser.add_argument('--vocab_file', type=str, help='Path to vocabulary file.',\n default=\"signalmedia.bow.vocab\")\n parser.add_argument('--data_dir', type=str, help='directory where all data files reside.')\n parser.add_argument('--evaluate_every', type=int,\n help=\"If given a test file, number of EM iterations between evaluations of test set. Default of 0 = evaluate after each epoch.\")\n parser.add_argument('--max_training_minutes', type=float,\n help=\"If given this will stop training once the specified number of minutes have elapsed.\")\n parser.add_argument('--max_epochs', type=int)\n parser.add_argument('--process_noise', type=float, default=0.2)\n parser.add_argument('--measurement_noise', type=float, default=0.8)\n parser.add_argument('--num_topics', type=int, default=75)\n parser.add_argument('--num_personas', type=int, default=25)\n parser.add_argument('--regularization', type=float, default=0.2,\n help=\"How much to penalize similar personas. Recommend [0, 0.5].\")\n parser.add_argument('--batch_size', type=int, default=512,\n help=\"Batch size. Set to -1 for full gradient updates, else stochastic mini-batches used.\")\n parser.add_argument('--num_workers', type=int, default=1)\n args = parser.parse_args()\n\n path_to_current_file = os.path.abspath(os.path.dirname(__file__))\n if args.data_dir is None:\n data_dir = os.path.join(path_to_current_file, \"../../data/signalmedia/blogs_aligned_3_30/\")\n else:\n data_dir = args.data_dir\n\n np.random.seed(2018)\n\n disable_log = False\n if disable_log:\n logging.disable(logging.INFO)\n else:\n log_format = '%(asctime)s : %(levelname)s : %(message)s'\n logging.basicConfig(format=log_format, level=logging.INFO)\n\n # initialize model\n dap = DAPPER(num_topics=args.num_topics, num_personas=args.num_personas,\n process_noise=args.process_noise, measurement_noise=args.measurement_noise,\n regularization=args.regularization,\n max_epochs=args.max_epochs, max_training_minutes=args.max_training_minutes,\n batch_size=args.batch_size,\n step_size=0.7, learning_offset=10, learning_decay=0.7,\n num_workers=args.num_workers)\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n train = Corpus(input_file=data_dir + args.train_file, vocab_file=data_dir + args.vocab_file)\n test = Corpus(input_file=data_dir + args.test_file,\n vocab_file=data_dir + args.vocab_file,\n author2id=train.author2id)\n\n train_results, test_results = dap.fit_predict(train_corpus=train, test_corpus=test,\n evaluate_every=args.evaluate_every,\n random_beta=False,\n check_model_lhood=True)\n # train_results = dap.fit(corpus=train, random_beta=False, check_model_lhood=False)\n print(dap)\n\n # save model output\n results_dir = data_dir.replace(\"/data/\", \"/results/\")\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n model_sig = \"K{}_P{}_bs{}_pn{}_mn{}_reg{}_epochs{}_cpu{}_{}.txt\".format(\n args.num_topics, args.num_personas,\n args.batch_size,\n int(100 * args.process_noise), int(100 * args.measurement_noise),\n int(100 * args.regularization), dap.total_epochs,\n args.num_workers, time.strftime('%m_%d_%Y_%H%M'))\n dap.save_topics(filename=results_dir + \"topics_\" + model_sig, topn=10)\n dap.save_author_personas(filename=results_dir + \"personas_\" + model_sig)\n dap.save_persona_topics(filename=results_dir + \"alpha_\" + model_sig)\n dap.save_convergences(filename=results_dir + \"train_convergence_\" + model_sig, results=train_results)\n dap.save_convergences(filename=results_dir + \"test_convergence_\" + model_sig, results=test_results)", "def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,\n debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):\n\n # If we are running with opencl_gui then set opencl to True, so you only need to pass one flag\n if opencl_gui:\n opencl = True\n\n # First see if we're reading a parameters file or using command-line arguments.\n if no_parameters_file:\n print(\"Not reading a parameters file\")\n else:\n print(f\"Reading parameters file: {parameters_file}. \"\n f\"Any other model-related command-line arguments are being ignored\")\n with open(parameters_file, 'r') as f:\n parameters = load(f, Loader=SafeLoader)\n sim_params = parameters[\"microsim\"] # Parameters for the dynamic microsim (python)\n calibration_params = parameters[\"microsim_calibration\"]\n disease_params = parameters[\"disease\"] # Parameters for the disease model (r)\n # TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:\n # self.params, self.params_changed = Model._init_kwargs(params, kwargs)\n # [setattr(self, key, value) for key, value in self.params.items()]\n # Utility parameters\n scenario = sim_params[\"scenario\"]\n iterations = sim_params[\"iterations\"]\n data_dir = sim_params[\"data-dir\"]\n output = sim_params[\"output\"]\n output_every_iteration = sim_params[\"output-every-iteration\"]\n debug = sim_params[\"debug\"]\n repetitions = sim_params[\"repetitions\"]\n lockdown_file = sim_params[\"lockdown-file\"]\n\n # Check the parameters are sensible\n if iterations < 1:\n raise ValueError(\"Iterations must be > 1. If you want to just initialise the model and then exit, use\"\n \"the --initialise flag\")\n if repetitions < 1:\n raise ValueError(\"Repetitions must be greater than 0\")\n if (not output) and output_every_iteration:\n raise ValueError(\"Can't choose to not output any data (output=False) but also write the data at every \"\n \"iteration (output_every_iteration=True)\")\n\n print(f\"Running model with the following parameters:\\n\"\n f\"\\tParameters file: {parameters_file}\\n\"\n f\"\\tScenario directory: {scenario}\\n\"\n f\"\\tInitialise (and then exit?): {initialise}\\n\"\n f\"\\tNumber of iterations: {iterations}\\n\"\n f\"\\tData dir: {data_dir}\\n\"\n f\"\\tOutputting results?: {output}\\n\"\n f\"\\tOutputting results at every iteration?: {output_every_iteration}\\n\"\n f\"\\tDebug mode?: {debug}\\n\"\n f\"\\tNumber of repetitions: {repetitions}\\n\"\n f\"\\tLockdown file: {lockdown_file}\\n\",\n f\"\\tUse cache?: {use_cache}\\n\",\n f\"\\tUse OpenCL version?: {opencl}\\n\",\n f\"\\tUse OpenCL GUI?: {opencl_gui}\\n\",\n f\"\\tUse OpenCL GPU for processing?: {opencl_gpu}\\n\",\n f\"\\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\\n\",\n f\"\\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\\n\")\n\n # To fix file path issues, use absolute/full path at all times\n # Pick either: get working directory (if user starts this script in place, or set working directory\n # Option A: copy current working directory:\n base_dir = os.getcwd() # get current directory\n data_dir = os.path.join(base_dir, data_dir)\n r_script_dir = os.path.join(base_dir, \"R\", \"py_int\")\n\n ### section for fetching data\n if not os.path.isdir(data_dir):\n\n print(f\"No data directory detected.\")\n\n if os.path.isfile(data_dir + \".tar.gz\"):\n print(f\"An archive file matching the name of the data directory has been detected!\")\n print(f\"Unpacking this archive file now.\")\n unpack_data(data_dir + \".tar.gz\")\n \n else:\n print(f\"{data_dir} does not exist. Downloading devon_data.\")\n data_setup()\n\n # Temporarily only want to use Devon MSOAs\n # devon_msoas = pd.read_csv(os.path.join(data_dir, \"devon_msoas.csv\"), header=None,\n # names=[\"x\", \"y\", \"Num\", \"Code\", \"Desc\"])\n\n # Prepare the QUANT api (for estimating school and retail destinations)\n # we only need 1 QuantRampAPI object even if we do multiple iterations\n # the quant_object object will be called by each microsim object\n quant_path = os.path.join(data_dir, \"QUANT_RAMP\")\n if not os.path.isdir(quant_path):\n raise Exception(\"QUANT directory does not exist, please check input\")\n quant_object = QuantRampAPI(quant_path)\n\n # args for population initialisation\n population_args = {\"data_dir\": data_dir, \"debug\": debug,\n \"quant_object\": quant_object}\n\n # args for Python/R Microsim. Use same arguments whether running 1 repetition or many\n msim_args = {\"data_dir\": data_dir, \"r_script_dir\": r_script_dir, \"scen_dir\": scenario, \"output\": output,\n \"output_every_iteration\": output_every_iteration}\n\n if not no_parameters_file: # When using a parameters file, include the calibration parameters\n msim_args.update(**calibration_params) # python calibration parameters are unpacked now\n # Also read the R calibration parameters (this is a separate section in the .yml file)\n if disease_params is not None:\n # (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -\n # it will be turned into an empty dictionary by the Microsim constructor)\n msim_args[\"disease_params\"] = disease_params # R parameters kept as a dictionary and unpacked later\n\n # Temporarily use dummy data for testing\n # data_dir = os.path.join(base_dir, \"dummy_data\")\n # m = Microsim(data_dir=data_dir, testing=True, output=output)\n\n # cache to hold previously calculate population data\n cache = InitialisationCache(cache_dir=os.path.join(data_dir, \"caches\"))\n\n # generate new population dataframes if we aren't using the cache, or if the cache is empty\n if not use_cache or cache.is_empty():\n print(f'Reading population data because {\"caching is disabled\" if not use_cache else \"the cache is empty\"}')\n population = PopulationInitialisation(**population_args)\n individuals = population.individuals\n activity_locations = population.activity_locations\n\n # store in cache so we can load later\n cache.store_in_cache(individuals, activity_locations)\n else: # load from cache\n print(\"Loading data from previous cache\")\n individuals, activity_locations = cache.read_from_cache()\n\n # Calculate the time-activity multiplier (this is for implementing lockdown)\n time_activity_multiplier = None\n if lockdown_file != \"\":\n print(f\"Implementing a lockdown with time activities from {lockdown_file}\")\n time_activity_multiplier: pd.DataFrame = \\\n PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))\n\n # Select which model implementation to run\n if opencl:\n run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,\n opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)\n else:\n # If -init flag set the don't run the model. Note for the opencl model this check needs to happen\n # after the snapshots have been created in run_opencl_model\n if initialise:\n print(\"Have finished initialising model. -init flag is set so not running it. Exitting\")\n return\n run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,\n repetitions, parameters_file)", "def main():\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_path',\n '-m',\n required=True,\n type=str,\n help='filepath to model')\n\n parser.add_argument('--input_data_path',\n '-i',\n required=True,\n type=str,\n help='filepath to input data, should be jsonl')\n\n parser.add_argument('--output_data_path',\n '-o',\n required=True,\n type=str,\n help='filepath to output data, should be jsonl')\n\n parser.add_argument('--text_column_name',\n '-t',\n required=True,\n type=str,\n help='name of column of with raw text in data')\n\n parser.add_argument('--metadata_columns',\n '-md',\n action='append',\n type=str,\n help='columns to include with predictions')\n\n parser.add_argument('--prediction_column_name',\n '-p',\n default=constants.PREDICTION_COLUMN_NAME,\n type=str,\n help='name of column with predictions')\n\n args = parser.parse_args()\n\n tag_notes(model_path=args.model_path,\n input_data_path=args.input_data_path,\n output_data_path=args.output_data_path,\n text_column_name=args.text_column_name,\n metadata_columns=args.metadata_columns,\n prediction_column_name=args.prediction_column_name)", "def __init__(\n self,\n hparams: argparse.Namespace,\n num_labels=None,\n mode=\"base\",\n config=None,\n tokenizer=None,\n model=None,\n **config_kwargs\n ):\n super().__init__()\n\n self.step_count = 0\n self.hparams = hparams\n self.output_dir = Path(self.hparams.output_dir)\n cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None\n if config is None:\n self.config = AutoConfig.from_pretrained(\n self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,\n **({\"num_labels\": num_labels} if num_labels is not None else {}),\n cache_dir=cache_dir,\n **config_kwargs,\n )\n else:\n self.config: BartConfig = config\n\n extra_model_params = (\"encoder_layerdrop\", \"decoder_layerdrop\", \"dropout\", \"attention_dropout\")\n for p in extra_model_params:\n if getattr(self.hparams, p, None):\n assert hasattr(self.config, p), f\"model config doesn't have a `{p}` attribute\"\n setattr(self.config, p, getattr(self.hparams, p))\n\n if tokenizer is None:\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,\n cache_dir=cache_dir,\n )\n else:\n self.tokenizer: BartTokenizer = tokenizer\n # self.model_type = MODEL_MODES[mode]\n if model is None:\n self.model = self.model_type.from_pretrained(\n self.hparams.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.hparams.model_name_or_path),\n config=self.config,\n cache_dir=cache_dir,\n )\n else:\n self.model = model", "def run_train(**kwargs):\n cmd = 'python yolov3/train.py'\n pms_list = [\n 'epochs', 'batch_size',\n 'gradient_accumulations', 'model_def',\n 'data_config', 'pretrained_weights',\n 'n_cpu', 'img_size', 'checkpoint_interval',\n 'evaluation_interval', 'compute_map',\n 'multiscale_training', 'verbose',\n 'logdir'\n ]\n call_command(pms_list, cmd, kwargs)", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()", "def run(cfg): # pylint: disable=too-many-locals,too-many-statements\n # load_text\n voca, gazet, data_, pos_model, word_model = load_text(cfg)\n\n char_voca = voca['in']\n\n # Build Ner model\n model = build_model(cfg, char_voca=char_voca, word_voca=None,\n gazet=gazet, pos_voca=pos_model.cfg.voca['out'])\n\n epoch_syl_cnt = data_['train'].get_syllable_count()\n iter_per_epoch = epoch_syl_cnt // cfg.batch_size\n iter_to_rvt = iter_per_epoch * cfg.rvt_epoch\n\n # Load GPU\n if torch.cuda.is_available():\n model.cuda()\n\n # Loss / Optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = cfg.optimizer(model.parameters())\n\n losses = []\n accuracies = []\n f_scores = []\n\n iter_ = 1\n best_iter = 0\n\n # Remove existing log directory\n if cfg.clean:\n logging.info('==== removing log: %s ====', cfg.model_dir)\n shutil.rmtree(cfg.model_dir)\n time.sleep(3)\n\n else:\n if cfg.ckpt_path.exists():\n logging.info('==== reverting from check point ====')\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n best_iter = model_dump['iter']\n iter_ = best_iter + 1\n losses.append(model_dump['loss'])\n accuracies.append(model_dump['accuracy'])\n f_scores.append(model_dump['f-score'])\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f ----',\n iter_ // 1000, losses[-1], accuracies[-1], f_scores[-1])\n lrs = [param_group['lr'] for param_group in optimizer.param_groups]\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n\n # Tensorboard Summary Writer\n sum_wrt = SummaryWriter(cfg.model_dir)\n\n # loss / accuracy / f-score logging (.tsv)\n log_path = cfg.model_dir.joinpath('log.tsv')\n logf = open(log_path, 'at' if cfg.ckpt_path.exists() else 'wt')\n if os.path.getsize(log_path) == 0:\n print('iter\\tloss\\taccuracy\\tf-score', file=logf)\n\n # Main Training Loop\n revert = 0\n one_more_thing = True # one more change to increase learning rate into 10 times\n batches = []\n while revert <= cfg.rvt_term or one_more_thing:\n for train_sent in data_['train']:\n # Convert to Tensor\n # labels [sentence_len]\n # contexts [sentence_len, 21]\n # gazet [sentence_len, 21, 15]\n train_sent.set_word_feature(pos_model, word_model, cfg.window)\n train_sent.set_pos_feature(pos_model, cfg.window)\n train_labels, train_contexts, train_gazet, train_pos, train_words = \\\n train_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n\n # Convert to Variable\n train_labels = Variable(train_labels)\n train_contexts = Variable(train_contexts)\n train_gazet = Variable(train_gazet)\n train_pos = Variable(train_pos, requires_grad=False)\n train_words = Variable(train_words, requires_grad=False)\n\n # Load on GPU\n if torch.cuda.is_available():\n train_labels = train_labels.cuda()\n train_contexts = train_contexts.cuda()\n train_gazet = train_gazet.cuda()\n train_pos = train_pos.cuda()\n train_words = train_words.cuda()\n\n # Reset Gradient\n optimizer.zero_grad()\n\n # Training mode (updates/dropout/batchnorm)\n model.train()\n\n # import ipdb; ipdb.set_trace()\n\n # Forward Prop\n outputs = model(train_contexts, train_gazet, train_pos, train_words)\n\n batches.append((train_labels, outputs))\n if sum([batch[0].size(0) for batch in batches]) < cfg.batch_size:\n continue\n batch_label = torch.cat([x[0] for x in batches], 0)\n batch_output = torch.cat([x[1] for x in batches], 0)\n batches = []\n\n # Backprop\n loss = criterion(batch_output, batch_label)\n loss.backward()\n optimizer.step()\n\n # Validation\n if iter_ % 1000 == 0:\n measure = tagger.PerformanceMeasure()\n # Freeze parameters\n model.eval()\n\n # Calculate loss\n losses.append(loss.data[0])\n for dev_sent in data_['dev']:\n # Convert to CUDA Variable\n dev_sent.set_word_feature(pos_model, word_model, cfg.window)\n dev_sent.set_pos_feature(pos_model, cfg.window)\n _, dev_contexts, dev_gazet, dev_pos, dev_words = \\\n dev_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n dev_contexts = Variable(dev_contexts, volatile=True)\n dev_gazet = Variable(dev_gazet, volatile=True)\n dev_pos = Variable(dev_pos, volatile=True)\n dev_words = Variable(dev_words, volatile=True)\n if torch.cuda.is_available():\n dev_contexts = dev_contexts.cuda()\n dev_gazet = dev_gazet.cuda()\n dev_pos = dev_pos.cuda()\n dev_words = dev_words.cuda()\n\n outputs = model(dev_contexts, dev_gazet, dev_pos, dev_words)\n\n _, predicts = outputs.max(1)\n dev_sent.compare_label(predicts, voca, measure)\n\n accuracy, f_score = measure.get_score()\n print(file=sys.stderr)\n sys.stderr.flush()\n if not f_scores or f_score > max(f_scores):\n logging.info('==== writing best model: %f ====', f_score)\n model.save(cfg.ckpt_path)\n check_point = CheckPoint(optimizer, model,\n {'iter': iter_, 'loss': loss.data[0],\n 'accuracy': accuracy, 'f-score': f_score})\n check_point.save(cfg.ckpt_path)\n logging.info('check point: %s', check_point)\n best_iter = iter_\n revert = 0\n one_more_thing = True\n accuracies.append(accuracy)\n f_scores.append(f_score)\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f (max: %r) ----',\n iter_ // 1000, losses[-1], accuracy, f_score, max(f_scores))\n\n if cfg.model_dir.exists():\n sum_wrt.add_scalar('loss', losses[-1], iter_ // 1000)\n sum_wrt.add_scalar('accuracy', accuracy, iter_ // 1000)\n sum_wrt.add_scalar('f-score', f_score, iter_ // 1000)\n print('{}\\t{}\\t{}\\t{}'.format(iter_ // 1000, losses[-1], accuracy,\n f_score), file=logf)\n logf.flush()\n\n # revert policy\n if (iter_ - best_iter) > iter_to_rvt:\n revert += 1\n logging.info('==== revert to iter: %dk, revert count: %d ====',\n best_iter // 1000, revert)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= (0.9 if one_more_thing else 0.8) ** revert\n lrs.append(param_group['lr'])\n best_iter = iter_\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n elif iter_ % 100 == 0:\n print('.', end='', file=sys.stderr)\n sys.stderr.flush()\n\n iter_ += 1\n if revert > cfg.rvt_term and one_more_thing:\n logging.info('==== one more thing, revert to iter: %dk ====', best_iter // 1000)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 10.0\n lrs.append(param_group['lr'])\n best_iter = iter_\n revert = 0\n one_more_thing = False\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))", "def main(args):\r\n\r\n # Logging info\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s - '\r\n '%(funcName)s: %(message)s',\r\n '%H:%M:%S')\r\n logger = logging.getLogger(__name__)\r\n logger.setLevel('INFO')\r\n stream = logging.StreamHandler()\r\n stream.setLevel('INFO')\r\n stream.setFormatter(formatter)\r\n logger.addHandler(stream)\r\n\r\n set_seed(args.seed)\r\n device = torch.device(\r\n 'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\r\n model_name = f'{args.name}_lr{args.lr}_z{args.latent_dim}' \\\r\n + f'_h{args.hidden_dim}_p{args.p_dropout}'\r\n model_dir = os.path.join(args.results, model_name)\r\n logger.info(f'Directory for saving and loading models: {model_dir}')\r\n\r\n if not args.eval:\r\n # Model directory\r\n new_model_dir(model_dir, logger=logger)\r\n\r\n # Dataloaders\r\n train_loader, valid_loader = get_dataloaders(\r\n args.data, args.t_hours, args.n_bins,\r\n validation=True, dynamic=args.dynamic,\r\n batch_size=args.bs, logger=logger)\r\n logger.info(\r\n f'Train {args.model_type}-{args.t_hours} ' +\r\n f'with {len(train_loader.dataset)} samples')\r\n\r\n # Load model\r\n n_tokens = len(np.load(\r\n os.path.join(\r\n args.data, '_dicts', f'{args.t_hours}_{args.n_bins}.npy'),\r\n allow_pickle=True).item())\r\n model = init_model(\r\n args.model_type, n_tokens, args.latent_dim, args.hidden_dim,\r\n p_dropout=args.p_dropout, dt=args.dt,\r\n weighted=args.weighted, dynamic=args.dynamic)\r\n logger.info(f'#params in model: {get_n_param(model)}')\r\n\r\n # Optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\r\n loss_f = BCE()\r\n model = model.to(device)\r\n\r\n # Training\r\n trainer = Trainer(\r\n model, loss_f, optimizer,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n trainer.train(\r\n train_loader, valid_loader,\r\n epochs=args.epochs, early_stopping=args.early_stopping)\r\n\r\n # Save model\r\n metadata = vars(args)\r\n metadata['n_tokens'] = n_tokens\r\n save_model(trainer.model, model_dir, metadata=metadata)\r\n\r\n if args.test:\r\n # Load model\r\n model = load_model(model_dir, is_gpu=args.cuda)\r\n metadata = load_metadata(model_dir)\r\n\r\n # Dataloader\r\n test_loader, _ = get_dataloaders(\r\n metadata['data'], metadata['t_hours'], metadata['n_bins'],\r\n validation=False, dynamic=metadata['dynamic'], batch_size=128,\r\n shuffle=False, logger=logger)\r\n\r\n # Evaluate\r\n loss_f = BCE()\r\n evaluator = Trainer(\r\n model, loss_f,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n evaluator._valid_epoch(test_loader)", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"xTrain\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"yTrain\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"xTest\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"yTest\",\n help=\"filename for labels associated with the test data\")\n parser.add_argument(\"lr\", type=float, help=\"learning rate\")\n parser.add_argument(\"bs\", type=int, help=\"batch size\")\n parser.add_argument(\"epoch\", type=int, help=\"max number of epochs\")\n parser.add_argument(\"--seed\", default=334, \n type=int, help=\"default seed number\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = file_to_numpy(args.xTrain)\n yTrain = file_to_numpy(args.yTrain)\n xTest = file_to_numpy(args.xTest)\n yTest = file_to_numpy(args.yTest)\n\n # setting the seed for deterministic behavior\n np.random.seed(args.seed) \n model = SgdLR(args.lr, args.bs, args.epoch)\n trainStats = model.train_predict(xTrain, yTrain, xTest, yTest)\n print(trainStats)", "def main():\n parser = make_argument_parser()\n args = parser.parse_args()\n\n input_dirs = args.inputdirs\n tf = args.factor\n valid_chroms = args.validchroms\n valid_input_dirs = args.validinputdirs\n test_chroms = args.testchroms\n epochs = args.epochs\n patience = args.patience\n learningrate = args.learningrate\n seed = args.seed\n utils.set_seed(seed)\n dropout_rate = args.dropout\n L = args.seqlen\n w = args.motifwidth\n utils.L = L\n utils.w = w\n utils.w2 = w/2\n negatives = args.negatives\n assert negatives > 0\n meta = args.meta\n gencode = args.gencode\n motif = args.motif\n\n num_motifs = args.kernels\n num_recurrent = args.recurrent\n num_dense = args.dense\n \n features = ['bigwig'] \n\n if tf:\n print 'Single-task training:', tf\n singleTask = True\n if meta:\n print 'Including metadata features'\n features.append('meta')\n if gencode:\n print 'Including genome annotations'\n features.append('gencode')\n else:\n print 'Multi-task training'\n singleTask = False\n #Cannot use any metadata features\n assert not meta\n assert not gencode\n\n if args.outputdir is None:\n clobber = True\n output_dir = args.outputdirc\n else:\n clobber = False\n output_dir = args.outputdir\n\n try: # adapted from dreme.py by T. Bailey\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n if not clobber:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'but you specified not to clobber it') % output_dir\n sys.exit(1)\n else:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'so it will be clobbered') % output_dir\n\n print 'Loading genome'\n genome = utils.load_genome()\n if valid_input_dirs:\n print 'You specified at least one validation input directory'\n assert singleTask # This option only works for single-task training\n print 'Loading ChIP labels'\n if singleTask:\n chip_bed_list, nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(input_dirs, tf)\n if valid_input_dirs:\n valid_chip_bed_list, valid_nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(valid_input_dirs, tf)\n num_tfs = 1\n else:\n assert len(input_dirs) == 1 # multi-task training only supports one cell line\n input_dir = input_dirs[0]\n tfs, positive_windows, y_positive, nonnegative_regions_bed = \\\n utils.load_chip_multiTask(input_dir)\n num_tfs = len(tfs)\n print 'Loading bigWig data'\n bigwig_names, bigwig_files_list = utils.load_bigwigs(input_dirs)\n num_bigwigs = len(bigwig_names)\n if valid_input_dirs:\n valid_bigwig_names, valid_bigwig_files_list = utils.load_bigwigs(valid_input_dirs)\n assert valid_bigwig_names == bigwig_names\n if not singleTask:\n bigwig_files = bigwig_files_list[0]\n if meta:\n print 'Loading metadata features'\n meta_names, meta_list = utils.load_meta(input_dirs)\n if valid_input_dirs:\n valid_meta_names, valid_meta_list = utils.load_load(valid_input_dirs)\n assert valid_meta_names == meta_names\n else:# meta option was not selected, pass empty metadata features to the functions\n meta_list = [[] for bigwig_files in bigwig_files_list]\n if valid_input_dirs:\n valid_meta_list = [[] for bigwig_files in valid_bigwig_files_list]\n \n print 'Making features'\n if singleTask:\n if not valid_input_dirs: #validation directories not used, must pass placeholder values\n valid_chip_bed_list = None\n valid_nonnegative_regions_bed_list = None\n valid_bigwig_files_list = None\n valid_meta_list = None \n datagen_train, datagen_valid = \\\n utils.make_features_singleTask(chip_bed_list,\n nonnegative_regions_bed_list, bigwig_files_list, bigwig_names,\n meta_list, gencode, genome, epochs, negatives, valid_chroms, test_chroms, \n valid_chip_bed_list, valid_nonnegative_regions_bed_list, \n valid_bigwig_files_list, valid_meta_list)\n else:\n datagen_train, datagen_valid = \\\n utils.make_features_multiTask(positive_windows, y_positive,\n nonnegative_regions_bed, bigwig_files, bigwig_names,\n genome, epochs, valid_chroms, test_chroms)\n print 'Building model'\n if num_recurrent == 0:\n print 'You specified 0 LSTM units. Omitting BLSTM layer'\n if num_recurrent < 0:\n print 'You specified less than 0 LSTM units. Replacing BLSTM layer with global max-pooling layer'\n if meta or gencode:\n num_meta = 0\n if meta:\n num_meta = len(meta_names)\n if gencode:\n num_meta += 6\n model = utils.make_meta_model(num_tfs, num_bigwigs, num_meta, num_motifs, num_recurrent, num_dense, dropout_rate)\n else:\n model = utils.make_model(num_tfs, num_bigwigs, num_motifs, num_recurrent, num_dense, dropout_rate)\n\n if motif:\n assert singleTask # This option only works with single-task training\n motifs_db = utils.load_motif_db('resources/HOCOMOCOv9.meme')\n if tf in motifs_db:\n print 'Injecting canonical motif'\n pwm = motifs_db[tf]\n pwm += 0.001\n pwm = pwm / pwm.sum(axis=1)[:, np.newaxis]\n pwm = np.log2(pwm/0.25)\n utils.inject_pwm(model, pwm)\n output_tf_file = open(output_dir + '/chip.txt', 'w')\n if singleTask:\n output_tf_file.write(\"%s\\n\" % tf)\n else:\n for tf in tfs:\n output_tf_file.write(\"%s\\n\" % tf)\n output_tf_file.close()\n output_feature_file = open(output_dir + '/feature.txt', 'w')\n for feature in features:\n output_feature_file.write(\"%s\\n\" % feature)\n output_feature_file.close()\n output_bw_file = open(output_dir + '/bigwig.txt', 'w')\n for bw in bigwig_names:\n output_bw_file.write(\"%s\\n\" % bw)\n output_bw_file.close()\n if meta:\n output_meta_file = open(output_dir + '/meta.txt', 'w')\n for meta_name in meta_names:\n output_meta_file.write(\"%s\\n\" % meta_name)\n output_meta_file.close()\n model_json = model.to_json()\n output_json_file = open(output_dir + '/model.json', 'w')\n output_json_file.write(model_json)\n output_json_file.close()\n train(datagen_train, datagen_valid, model, epochs, patience, learningrate, output_dir)", "def main(model_folder, override=False):\n model_description_file = os.path.join(model_folder, \"info.yml\")\n # Read the model description file\n with open(model_description_file) as ymlfile:\n model_description = yaml.safe_load(ymlfile)\n\n project_root = utils.get_project_root()\n # Read the feature description file\n feature_folder = os.path.join(project_root, model_description[\"data-source\"])\n with open(os.path.join(feature_folder, \"info.yml\")) as ymlfile:\n feature_description = yaml.safe_load(ymlfile)\n # Get a list of all used features\n feature_list = features.get_features(feature_description[\"features\"])\n # Get the dimension of the feature vector\n input_features = sum(n.get_dimension() for n in feature_list)\n logger.info(\"Number of features: %i\", input_features)\n\n # Analyze model\n logger.info(model_description[\"model\"])\n if model_description[\"model\"][\"type\"] != \"mlp\":\n return\n create_model(\n model_folder,\n model_description[\"model\"][\"type\"],\n model_description[\"model\"][\"topology\"],\n override,\n )\n utils.create_run_logfile(model_folder)", "def main():\n if len(sys.argv) == 2 and sys.argv[1] == 'train':\n trainer = FlightModelTrainer()\n trainer.train()\n return 0\n\n if len(sys.argv) == 2 and sys.argv[1] == 'graphics':\n trainer = FlightModelTrainer()\n trainer.visualize()\n return 0\n\n predictor = FlightPredictor(path_to_weather=WEATHER_TRAIN_DATA_PATH)\n result = predictor.predict(pd.read_csv(FLIGHTS_TEST_DATA_PATH))\n print('result')\n print(result)\n # result.to_csv(\"out.csv\")\n return 0", "def main(**args):\n env = gym.make(args.pop('env'))\n\n ac_args = {'hidden_size': [64, 64], 'size': 2}\n\n # Discriminator approximators\n disc_args = {\n 'g_args': {\n 'hidden_layers': [32, 1],\n 'size': 1,\n 'activation': nn.Identity\n },\n 'h_args': {\n 'hidden_layers': [32, 32, 1],\n 'size': 2,\n 'activation': nn.LeakyReLU\n }\n }\n\n ac_args.update(**disc_args)\n\n train_args = {\n 'pi_train_n_iters': 80,\n 'disc_train_n_iters': 40,\n 'max_kl': args.pop('target_kl') or 1.,\n 'kl_start': 20,\n 'entropy_reg': .1,\n 'clip_ratio': .2,\n 'max_eps_len': 150,\n 'real_label': 1,\n 'pi_label': 0\n }\n agent_args = {\n 'n_epochs': args.pop('epochs') or 250,\n 'env_name': '', # 'b_10000_plr_.1e-4',\n 'steps_per_epoch': 10000\n }\n\n all_args = {\n 'ac_args': ac_args,\n 'pi_lr': 2e-4,\n 'disc_lr': 1e-4,\n 'gamma': .99,\n 'buffer_size': int(1e6),\n **agent_args,\n **train_args,\n **{k: v\n for k, v in args.items() if v}\n }\n\n airl(env, **all_args)", "def main():\n parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')\n parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')\n parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)\n parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)\n parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=5)\n parser.add_argument('-c', help='steering correction', dest='correction', type=float, default=0.2)\n parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=32)\n parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')\n parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-3)\n args = parser.parse_args()\n\n print('-' * 30)\n print('Parameters')\n print('-' * 30)\n for key, value in vars(args).items():\n print('{:<20} := {}'.format(key, value))\n print('-' * 30)\n\n data = load_data(args)\n model = build_model(args)\n train_model(model, args, *data)", "def main(args):\n # Generate detectron2 config from command line arguments.\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n\n # The configuration file should not contain any datasets. They are configured\n # from command line arguments instead.\n if len(cfg.DATASETS.TRAIN) > 0 or len(cfg.DATASETS.TEST) > 0:\n logging.error(\"Please set DATASETS.TRAIN = () and DATASETS.TEST = ().\")\n sys.exit(1)\n cfg.DATASETS.TRAIN = (TRAIN_SPLIT_NAME, )\n cfg.DATASETS.TEST = (VALID_SPLIT_NAME, )\n\n cfg.freeze()\n default_setup(cfg, args)\n\n # Register synthetic sign datasets.\n if args.image_width is not None or args.image_height is not None:\n if args.image_width is None or args.image_height is None:\n logging.error(\n \"Please specify both, image-width and image-height (or none).\")\n sys.exit(1)\n image_shape = args.image_height, args.image_width\n else:\n image_shape = None\n\n register_synthetic_signs(args.train_csv,\n args.label_map,\n cfg,\n name=TRAIN_SPLIT_NAME,\n image_shape=image_shape)\n if args.valid_csv is not None:\n register_synthetic_signs(args.valid_csv,\n args.label_map,\n cfg,\n name=VALID_SPLIT_NAME,\n image_shape=image_shape)\n\n # Run training or evaluation.\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume)\n res = Trainer.test(cfg, model)\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n return trainer.train()", "def main(args):\n\n # Compose the model list\n modellist = []\n if args['model']:\n modellist.append(bmark.ModelInfo(args['model'], os.getcwd(), args['classname']))\n\n # Load the benchmark settings\n benchmark = None\n benchmark = bmark.load_benchmark(args['benchmark'])\n corresponding_data = False\n if 'corresponding_data' in benchmark:\n corresponding_data = benchmark['corresponding_data']\n\n # Only extend if not cached\n cache_df = None\n if not args['cache']:\n modellist.extend(benchmark['models'])\n else:\n cache_df = pd.read_csv(args['cache'])\n\n # Extract comparator settings from benchmark description\n eval_comparator = comparator.EqualityComparator()\n if 'comparator' in benchmark:\n if benchmark['comparator'] == 'nvc':\n eval_comparator = comparator.NVCComparator()\n\n # Run the model evaluation\n is_silent = (args['output'] in ['html', 'server'])\n eva = None\n if benchmark['type'] == 'adaption':\n eva = evaluator.AdaptionEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n elif benchmark['type'] == 'coverage':\n # Check for benchmark validity\n if benchmark['data.train'] or benchmark['data.train_person']:\n print('WARNING: Ignoring specified training and train_person data ' \\\n + 'for coverage evaluation...')\n\n eva = evaluator.CoverageEvaluator(\n modellist,\n eval_comparator,\n benchmark['data.test'],\n train_datafile=benchmark['data.train'],\n train_data_person=benchmark['data.train_person'],\n silent=is_silent,\n corresponding_data=corresponding_data,\n domain_encoders=benchmark['domain_encoders'],\n cache_df=cache_df\n )\n else:\n raise ValueError('Unknown benchmark type: {}'.format(benchmark['type']))\n\n with silence_stdout(is_silent):\n res_df = eva.evaluate()\n\n if 'save' in args:\n res_df.to_csv(args['save'], index=False)\n\n # Run the metric visualizer\n htmlcrtr = html_creator.HTMLCreator([\n viz_plot.AccuracyVisualizer(),\n viz_plot.BoxplotVisualizer(),\n viz_plot.TableVisualizer()\n ])\n\n # Prepare the benchmark output information and visualize the evaluation results\n benchmark_info = {\n 'name': os.path.basename(args['benchmark']),\n 'data.train': os.path.basename(\n benchmark['data.train']) if benchmark['data.train'] else '',\n 'data.train_person': os.path.basename(\n benchmark['data.train_person']) if benchmark['data.train_person'] else '',\n 'data.test': os.path.basename(benchmark['data.test']),\n 'type': benchmark['type'],\n 'corresponding_data': benchmark['corresponding_data'],\n 'domains': list(res_df['domain'].unique()),\n 'response_types': list(res_df['response_type'].unique()),\n }\n\n if args['output'] == 'browser':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n server.load_in_default_browser(html.encode('utf8'))\n elif args['output'] == 'server':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=True)\n sys.stdout.buffer.write(html.encode('utf-8'))\n elif args['output'] == 'html':\n html = htmlcrtr.to_html(res_df, benchmark_info, embedded=False)\n print(html)", "def main(args):\n\n from logutils import setupLogging\n\n # Handle command line argument:\n parser = argparse.ArgumentParser(description='Neural rl agent.')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbosity\", default=0, action=\"count\",\n help=\"Verbosity. Invoke many times for higher verbosity\")\n parser.add_argument(\"-g\", '--game-name', dest=\"game_name\", default=None,\n help='Name of the game')\n parser.add_argument('-b', '--batch-size', dest=\"batch_size\", type=int, default=TestingNeuralAgent.DefaultBatchSize,\n help='Batch size (default: %(default)s)')\n parser.add_argument('-e', '--experiment-directory', dest=\"experiment_directory\", type=str, required=True,\n help='Directory where experiment details were saved')\n parser.add_argument('-t', '--test-epsilon', dest=\"testing_epsilon\", type=float, default=TestingNeuralAgent.DefaultTestingEpsilon,\n help='Epsilon to use during testing (default: %(default)s)') \n parser.add_argument(\"-p\", '--pause', dest=\"pause\", type=float, default=TestingNeuralAgent.DefaultPauseTime,\n help='Amount of time to pause display while testing. (default: %(default)s)')\n parser.add_argument(\"-hl\", '--history-length', dest=\"history_length\", type=int, default=TestingNeuralAgent.DefaultHistoryLength,\n help='History length (default: %(default)s)')\n parser.add_argument('--no-video', dest=\"video\", default=True, action=\"store_false\",\n help='Do not make a \"video\" record of the best run in each game') \n parser.add_argument('--no-records', dest=\"recording\", default=True, action=\"store_false\",\n help='Do not record anything about the experiment (best games, epoch networks, test results, etc)')\n\n\n # ignore unknowns\n parameters, _ = parser.parse_known_args(args)\n\n setupLogging(parameters.verbosity)\n\n if not parameters.recording:\n best_video = learning_log = False\n else:\n best_video = parameters.video\n learning_log = True\n\n AgentLoader.loadAgent(TestingNeuralAgent(parameters.game_name,\n batch_size=parameters.batch_size,\n experiment_directory=parameters.experiment_directory,\n testing_epsilon=parameters.testing_epsilon,\n pause=parameters.pause,\n history_length=parameters.history_length,\n best_video=best_video,\n learning_log=learning_log))", "def parse_arguments(args_to_parse):\r\n desc = 'Pytorch implementation and evaluation of flexible EHR embedding.'\r\n parser = argparse.ArgumentParser(\r\n description=desc, formatter_class=FormatterNoDuplicate)\r\n\r\n # General options\r\n general = parser.add_argument_group('General options')\r\n general.add_argument('name',\r\n type=str,\r\n help='Name of the model for storing and loading.')\r\n general.add_argument('-r', '--results',\r\n type=str, default='results',\r\n help='Directory to store results.')\r\n general.add_argument('--p-bar',\r\n action='store_true', default=True,\r\n help='Show progress bar.')\r\n general.add_argument('--cuda',\r\n action='store_true', default=True,\r\n help='Whether to use CUDA training.')\r\n general.add_argument('-s', '--seed',\r\n type=int, default=0,\r\n help='Random seed. `None` for stochastic behavior.')\r\n\r\n # Learning options\r\n training = parser.add_argument_group('Training options')\r\n training.add_argument('data',\r\n type=str,\r\n help='Path to data directory')\r\n training.add_argument('-e', '--epochs',\r\n type=int, default=20,\r\n help='Maximum number of epochs.')\r\n training.add_argument('-bs',\r\n type=int, default=128,\r\n help='Batch size for training.')\r\n training.add_argument('--lr',\r\n type=float, default=5e-4,\r\n help='Learning rate.')\r\n training.add_argument('--early-stopping',\r\n type=int, default=5,\r\n help='Epochs before early stopping.')\r\n\r\n # Model options\r\n model = parser.add_argument_group('Model specfic options')\r\n model.add_argument('-m', '--model-type',\r\n default='Mortality', choices=MODELS,\r\n help='Type of decoder to use.')\r\n model.add_argument('-t', '--t_hours',\r\n type=int, default=48,\r\n help='ICU data time length.')\r\n model.add_argument('-n', '--n_bins',\r\n type=int, default=20,\r\n help='Number of bins per continuous variable.')\r\n model.add_argument('--dt',\r\n type=float, default=1.0,\r\n help='Time increment between sequence steps.')\r\n model.add_argument('-z', '--latent-dim',\r\n type=int, default=32,\r\n help='Dimension of the token embedding.')\r\n model.add_argument('-H', '--hidden-dim',\r\n type=int, default=256,\r\n help='Dimension of the LSTM hidden state.')\r\n model.add_argument('-p', '--p-dropout',\r\n type=float, default=0.0,\r\n help='Embedding dropout rate.')\r\n model.add_argument('-w', '--weighted',\r\n type=bool, default=True,\r\n help='Whether to weight embeddings.')\r\n model.add_argument('-D', '--dynamic',\r\n type=bool, default=True,\r\n help='Whether to perform dynamic prediction.')\r\n\r\n # Evaluation options\r\n evaluation = parser.add_argument_group('Evaluation options')\r\n evaluation.add_argument('--eval',\r\n action='store_true', default=False,\r\n help='Whether to evaluate using pretrained model.')\r\n evaluation.add_argument('--test',\r\n action='store_true', default=True,\r\n help='Whether to compute test losses.')\r\n\r\n args = parser.parse_args(args_to_parse)\r\n\r\n return args", "def forward(opt):\n my_utils.plant_seeds(randomized_seed=opt.randomize)\n os.makedirs(opt.output_dir, exist_ok=True)\n\n trainer = t.Trainer(opt)\n trainer.build_dataset_train_for_matching()\n trainer.build_dataset_test_for_matching()\n trainer.build_network()\n trainer.build_losses()\n trainer.network.eval()\n\n if opt.eval_list and os.path.isfile(opt.eval_list):\n source_target_files = np.loadtxt(opt.eval_list, dtype=str)\n source_target_files = source_target_files.tolist()\n for i, st in enumerate(source_target_files):\n source, target = st\n cat1, fname1 = source.split('/')\n fname1 = os.path.splitext(fname1)[0]\n cat2, fname2 = target.split('/')\n fname2 = os.path.splitext(fname2)[0]\n if len(opt.shapenetv1_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv1_path, cat1, fname1, \"model.obj\"), os.path.join(opt.shapenetv1_path, cat2, fname2, \"model.obj\"))\n elif len(opt.shapenetv2_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv2_path, cat1, fname1, \"models\", \"model_normalized.obj\"), os.path.join(opt.shapenetv2_path, cat2, fname2, \"models\", \"model_normalized.obj\"))\n elif (opt.eval_source != \"\" and opt.eval_source[-4:] == \".txt\") and (opt.eval_target != \"\" and opt.eval_target[-4:] == \".txt\"):\n source_target_files = [(figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_source), figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_target))]\n\n rot_mat = get_3D_rot_matrix(1, np.pi/2)\n rot_mat_rev = get_3D_rot_matrix(1, -np.pi/2)\n isV2 = len(opt.shapenetv2_path) > 0\n for i, source_target in enumerate(source_target_files):\n basename = get_model_id(source_target[0], isV2) + \"-\" + get_model_id(source_target[1], isV2)\n path_deformed = os.path.join(opt.output_dir, basename + \"-Sab.ply\")\n path_source = os.path.join(opt.output_dir, basename + \"-Sa.ply\")\n path_target = os.path.join(opt.output_dir, basename +\"-Sb.ply\")\n\n mesh_path = source_target[0]\n print(mesh_path)\n source_mesh_edge = get_shapenet_model.link(mesh_path)\n\n mesh_path = source_target[1]\n target_mesh_edge = get_shapenet_model.link(mesh_path)\n\n\n print(\"Deforming source in target\")\n\n source = source_mesh_edge.vertices\n target = target_mesh_edge.vertices\n\n pymesh.save_mesh_raw(path_source, source, source_mesh_edge.faces, ascii=True)\n pymesh.save_mesh_raw(path_target, target, target_mesh_edge.faces, ascii=True)\n\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat)\n target = target.dot(rot_mat)\n\n source = torch.from_numpy(source).cuda().float().unsqueeze(0)\n target = torch.from_numpy(target).cuda().float().unsqueeze(0)\n\n with torch.no_grad():\n source, _, _, _, _ = loss.forward_chamfer(trainer.network, source, target, local_fix=None,\n distChamfer=trainer.distChamfer)\n\n try:\n source = source.squeeze().cpu().detach().numpy()\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat_rev)\n P2_P1_mesh = pymesh.form_mesh(vertices=source, faces=source_mesh_edge.faces)\n pymesh.save_mesh(path_deformed, P2_P1_mesh, ascii=True)\n\n # print(\"computing signal tranfer form source to target\")\n # high_frequencies.high_frequency_propagation(path_source, path_deformed, path_target)\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n path_deformed = path_deformed[:-4] + \".pts\"\n save_pts(path_deformed, source.squeeze().cpu().detach().numpy())", "def main(unused_argv):\n model_params = sketch_rnn_model.get_default_hparams()\n if FLAGS.hparams:\n model_params.parse(FLAGS.hparams)\n trainer(model_params)", "def run_modeling(path, path_to_rosetta, target, nsrtuct):\n\n path_to_rosetta_script = path_to_rosetta + 'main/source/bin/rosetta_scripts.default.linuxgccrelease'\n path_to_rosetta_database = path_to_rosetta + 'main/database'\n path_to_target = path + target + '.fasta'\n path_to_protocol = path + 'Modeling/hybridize.xml'\n subprocess.run([path_to_rosetta_script,\n '-database', path_to_rosetta_database,\n '-in:file:fasta', path_to_target,\n '-parser:protocol', path_to_protocol,\n '-default_max_cycles', '200',\n '-dualspace',\n '-nstruct', nsrtuct,\n '-restore_talaris_behavior',\n '-score:set_weights', 'pro_close', '0', 'cart_bonded', '0.5'])\n for i in range(1, int(nsrtuct) + 1):\n name = 'modeled_' + target + '_' + str(i) + '.pdb'\n os.rename('S_000' + str(i) + '.pdb', name)\n shutil.move(name, path + 'Modeling/final_models/')", "def run_vorpaline(self, input_file, *options):\n\n self._input_file = input_file\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpalite\", args)", "def main(_):\n if FLAGS.decode != True:\n train()\n else:\n decode()", "def main() -> None:\n pl.seed_everything(\n seed=1,\n workers=True,\n )\n\n config = read_config(path=Path(\"configs/blobs.yml\"))\n\n datamodule = BlobsDataModule(config=config)\n model = BlobsClassifierModel(config=config)\n\n trainer = get_blobs_trainer_with_callbacks(config=config)\n\n trainer.fit(\n model=model,\n datamodule=datamodule,\n )\n print(\"Best checkpoint path:\", trainer.checkpoint_callback.best_model_path)\n\n trainer.test(\n model=model,\n datamodule=datamodule,\n ckpt_path=\"best\",\n verbose=True,\n )\n\n predictions_probabilities = trainer.predict(\n model=model,\n datamodule=datamodule,\n return_predictions=True,\n ckpt_path=\"best\",\n )\n print(predictions_probabilities)", "def train(args):\n print(args)\n\n # Run a training job\n configs = LuxMatchConfigs_Default\n\n # Create a default opponent agent\n opponent = Agent()\n\n # Create a RL agent in training mode\n player = AgentPolicy(mode=\"train\")\n\n # Train the model\n env_eval = None\n if args.n_envs == 1:\n env = LuxEnvironment(configs=configs,\n learning_agent=player,\n opponent_agent=opponent)\n else:\n env = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(args.n_envs)])\n \n run_id = args.id\n print(\"Run id %s\" % run_id)\n\n if args.path:\n # by default previous model params are used (lr, batch size, gamma...)\n model = PPO.load(args.path)\n model.set_env(env=env)\n\n # Update the learning rate\n model.lr_schedule = get_schedule_fn(args.learning_rate)\n\n # TODO: Update other training parameters\n else:\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate=args.learning_rate,\n gamma=args.gamma,\n gae_lambda=args.gae_lambda,\n batch_size=args.batch_size,\n n_steps=args.n_steps\n )\n\n \n \n callbacks = []\n\n # Save a checkpoint and 5 match replay files every 100K steps\n player_replay = AgentPolicy(mode=\"inference\", model=model)\n callbacks.append(\n SaveReplayAndModelCallback(\n save_freq=100000,\n save_path='./models/',\n name_prefix=f'model{run_id}',\n replay_env=LuxEnvironment(\n configs=configs,\n learning_agent=player_replay,\n opponent_agent=Agent()\n ),\n replay_num_episodes=5\n )\n )\n \n # Since reward metrics don't work for multi-environment setups, we add an evaluation logger\n # for metrics.\n if args.n_envs > 1:\n # An evaluation environment is needed to measure multi-env setups. Use a fixed 4 envs.\n env_eval = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(4)])\n\n callbacks.append(\n EvalCallback(env_eval, best_model_save_path=f'./logs_{run_id}/',\n log_path=f'./logs_{run_id}/',\n eval_freq=args.n_steps*2, # Run it every 2 training iterations\n n_eval_episodes=30, # Run 30 games\n deterministic=False, render=False)\n )\n\n print(\"Training model...\")\n model.learn(total_timesteps=args.step_count,\n callback=callbacks)\n if not os.path.exists(f'models/rl_model_{run_id}_{args.step_count}_steps.zip'):\n model.save(path=f'models/rl_model_{run_id}_{args.step_count}_steps.zip')\n print(\"Done training model.\")\n\n # Inference the model\n print(\"Inference model policy with rendering...\")\n saves = glob.glob(f'models/rl_model_{run_id}_*_steps.zip')\n latest_save = sorted(saves, key=lambda x: int(x.split('_')[-2]), reverse=True)[0]\n model.load(path=latest_save)\n obs = env.reset()\n for i in range(600):\n action_code, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action_code)\n if i % 5 == 0:\n print(\"Turn %i\" % i)\n env.render()\n\n if done:\n print(\"Episode done, resetting.\")\n obs = env.reset()\n print(\"Done\")\n\n '''\n # Learn with self-play against the learned model as an opponent now\n print(\"Training model with self-play against last version of model...\")\n player = AgentPolicy(mode=\"train\")\n opponent = AgentPolicy(mode=\"inference\", model=model)\n env = LuxEnvironment(configs, player, opponent)\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate = 0.0003,\n gamma=0.999,\n gae_lambda = 0.95\n )\n model.learn(total_timesteps=2000)\n env.close()\n print(\"Done\")\n '''", "def run_test(**kwargs):\n cmd = 'python yolov3/test.py'\n pms_list = [\n 'batch_size', 'model_def',\n 'data_config', 'weights_path',\n 'class_path', 'iou_thres',\n 'nms_thres', 'conf_thres',\n 'n_cpu', 'img_size'\n ]\n call_command(pms_list, cmd, kwargs)", "def glm_launch_cli(level, model, glm_config_file, test_one, submit, debug):\n from .glm_launch import glm_launch\n glm_launch(glm_config_file=glm_config_file, level=level, \n model=model, test_one=test_one, \n submit=submit, debug=debug)", "def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))", "def main():\r\n\r\n # Command-line arguments\r\n training_data = argv[1]\r\n hypothesis_out = argv[2]\r\n learning_type = argv[3]\r\n test = argv[4]\r\n labels = None\r\n if len(argv) > 5:\r\n labels = argv[5]\r\n\r\n # Parse data and determine features\r\n feat_obj = FeatureParser(training_data)\r\n data = FeatureData(feat_obj.features)\r\n\r\n # Train model using DT or DT + adaboost\r\n train(data, hypothesis_out, learning_type)\r\n\r\n # Predict on test set with trained model\r\n predictions = predict(hypothesis_out, test, learning_type)\r\n\r\n # Evaluate accuracy of test data if provided lables\r\n if labels:\r\n accuracy = evaluate(predictions, labels)\r\n print('Model accuracy on test data:',str(accuracy) + '%')" ]
[ "0.6514513", "0.61850375", "0.5984537", "0.59499717", "0.5898596", "0.5883336", "0.5870205", "0.5862893", "0.57968825", "0.5790698", "0.5787918", "0.5760887", "0.57562155", "0.5749064", "0.57390314", "0.57390314", "0.5728084", "0.5717244", "0.57032615", "0.569978", "0.5696714", "0.5678651", "0.56771415", "0.5657772", "0.5642329", "0.5638586", "0.5628816", "0.56284744", "0.5627556", "0.5602024", "0.55934715", "0.5584413", "0.5575655", "0.5574289", "0.55738664", "0.556951", "0.5561261", "0.55580944", "0.5557064", "0.5545743", "0.5538579", "0.5535086", "0.5532913", "0.55306137", "0.5526438", "0.55207974", "0.5517383", "0.5516315", "0.55082434", "0.54952985", "0.54940176", "0.54883146", "0.54847443", "0.54800206", "0.54757386", "0.5475426", "0.54463506", "0.5431662", "0.5429657", "0.5424941", "0.54219246", "0.5399411", "0.5392463", "0.53891474", "0.53844154", "0.5382302", "0.53805345", "0.53758603", "0.5374492", "0.5373419", "0.5371842", "0.53706425", "0.536586", "0.5360263", "0.5355082", "0.53548735", "0.5343141", "0.5342199", "0.53251594", "0.53203726", "0.5317505", "0.53082186", "0.5304977", "0.5302179", "0.5301109", "0.52970785", "0.52964187", "0.52936953", "0.5292355", "0.52911973", "0.5289504", "0.5288117", "0.52869177", "0.52852225", "0.5282936", "0.52767503", "0.52695197", "0.5269264", "0.5259268", "0.52583045", "0.52565116" ]
0.0
-1
Like `Flask.app.route` but takes only a function that returns HtmlSanitizedStr
def safe_route(app: Flask, rule, **options) -> Callable[[RouteFunction], None]: original_decorator = app.route(rule, **options) def decorator(fn: RouteFunction): return original_decorator(compose(str, fn)) # type: ignore return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_html(func):\n\n cleaner = re.compile(\"<.*?>\")\n def new_func(*args, strip_html=False, **kwargs):\n name = func(*args, **kwargs)\n if strip_html:\n if isinstance(name, str):\n return html.unescape(re.sub(cleaner, \"\", name))\n elif isinstance(name, list) or isinstance(name, tuple):\n return type(name)([html.unescape(re.sub(cleaner, \"\", n)) for n in name])\n else:\n return name\n new_func.__name__ = func.__name__\n new_func.__doc__ = func.__doc__\n return new_func", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def sanitize(text):\n try:\n from airy.core import sanitizer\n return smart_unicode(sanitizer.clean_html(text))\n except ImportError:\n logging.error(\"You need html5lib in order to use sanitize\")\n return \"ERROR: You need html5lib in order to use sanitize\"", "def rest2html(s):\n return core.publish_string(s, writer=html_fragment_writer)", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def raw():\n return redirect(url_for('render.render', raw='true', **request.args))", "def wsgiapp():\n def decorator(func):\n def wsgiapp_wrapper(*args):\n # we get 3 args when this is a method, two when it is\n # a function :(\n if len(args) == 3:\n environ = args[1]\n start_response = args[2]\n args = [args[0]]\n else:\n environ, start_response = args\n args = []\n def application(environ, start_response):\n form = request.parse_formvars(environ,\n include_get_vars=True)\n status = '200 OK'\n form['environ'] = environ\n try:\n res = func(*args, **form.mixed())\n except ValueError, ve:\n status = '500 Server Error'\n res = '<html>There was an error: %s</html>' % \\\n html_quote(ve)\n start_response(status, [('content-type', 'text/html')])\n return [res]\n app = simplecatcher(application)\n return app(environ, start_response)\n wsgiapp_wrapper.exposed = True\n return wsgiapp_wrapper\n return decorator", "def sanitize(string):\n from html5lib import parseFragment, serialize\n\n parsed = parseFragment(string)\n clean = serialize(parsed, sanitize=True, omit_optional_tags=False,\n quote_attr_values='always')\n return clean", "def form(s):\r\n \r\n # removes leading and trailing apostrophe's from string\r\n s = s.strip(\"'\")\r\n \r\n # converts HTML hex back to characters\r\n s = s.replace(\"&#39;\", \"'\")\r\n s = s.replace(\"&#8217;\", \"’\")\r\n s = s.replace(\"&#8216;\", '\"')\r\n s = s.replace(\"&#8221;\", \"'\")\r\n s = s.replace(\"&#8220;\", \"'\")\r\n \r\n # success\r\n return s", "def wsgiapp(self):\n def wrapped(environ, start_response):\n \"\"\"wsgi application function\"\"\"\n start_time = time.clock()\n req = Request(environ)\n res = Responder(start_response, environ, self.mylookup, start_time)\n \n \n found_matches = None\n route = {}\n for reg, route in self.routes:\n found_matches = re.match(route['regex'], req.path)\n if found_matches and meetsreqs(req, route['reqs']):\n break\n else:\n return ''\n bindings = route['kwargs']\n for part in route['parts']:\n if len(part) == 2:\n bindings[part[0]] = part[1]\n for part in xrange(len(found_matches.groups())):\n if found_matches.group(part+1):\n partname = route['parts'][part][0]\n bindings[partname] = found_matches.group(part+1)\n\n return str(route['function'](res, **dict(bindings)))\n\n return wrapped", "def _on_raw(func_name):\n\n def wrapped(self, *args, **kwargs):\n args = list(args)\n try:\n string = args.pop(0)\n if hasattr(string, \"_raw_string\"):\n args.insert(0, string.raw())\n else:\n args.insert(0, string)\n except IndexError:\n # just skip out if there are no more strings\n pass\n result = getattr(self._raw_string, func_name)(*args, **kwargs)\n if isinstance(result, str):\n return ANSIString(result, decoded=True)\n return result\n\n return wrapped", "def make_html_safe(s):\n return s.replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")", "def any_string_method(request):\n return request.param", "def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result", "def csrf_protection(fn):\n def protected(*args):\n if 'X-Requested-With' in request.headers:\n return fn(*args)\n else:\n return \"X-Requested-With header missing\", HTTPStatus.FORBIDDEN\n return protected", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def _sanitize_function(self, func_msg):\n if func_msg is not None:\n func = str(func_msg)\n else:\n func = None\n return func", "def escape(input):\n # first correct the HTML\n output=html(input)\n # print \"HTML is: %s\" % output\n # then escape it\n output=atpic.cleaner_escape.escape(output)\n # print \"ESCAPD is: %s\" % output\n return output", "def urlify_pythonic(text, length):\n return text.rstrip().replace(\" \", \"%20\")", "def sanitize(cls):", "def html_tag(string, input_id, proc):\n return html_simple_element(\n string, \"a\", 'id=\"' + proc + \"_\" + normalise_tag_id(input_id) + '\"'\n )", "def get_html_string(self, **kwargs):\n ...", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def urlify(w, length):\n return w.strip().replace(' ', '%20')", "def assert_clean(data):\n def _ensure_clean(value):\n if value != bleach.clean(value):\n raise ValueError\n\n return escape_html(data)", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def hello():\n return \"<h1 style='color:blue'>Hello There, Gainzzzasasas!</h1>\"", "def sanitize(sensitive_thing):\n sanitized_string = sensitive_thing\n length = len(sensitive_thing)\n if sensitive_thing:\n if \"http\" in sensitive_thing:\n # Split the URL – expecting a Slack (or other) webhook\n sensitive_thing = sensitive_thing.split(\"/\")\n # Get just the last part for sanitization\n webhook_tail = \"\".join(sensitive_thing[-1:])\n length = len(webhook_tail)\n # Construct a sanitized string\n sanitized_string = (\n \"/\".join(sensitive_thing[:-1])\n + \"/\"\n + webhook_tail[0:4]\n + \"\\u2717\" * (length - 8)\n + webhook_tail[length - 5 : length - 1]\n )\n # Handle anything else that's long enough to be a key\n elif length > 15:\n sanitized_string = sensitive_thing[0:4] + \"\\u2717\" * (length - 8) + sensitive_thing[length - 5 : length - 1]\n return sanitized_string", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)", "def tweet_sanitize(tweet: str) -> str:\n pipeline = [strip_links, strip_mentions, strip_hashtags, strip_all_entities,\n remove_special_characters]\n for fun in pipeline:\n tweet = fun(tweet)\n return tweet", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def method(name, doc):\n import html\n\n params = method_params(doc)\n doc = html.escape(doc)\n return string.Template(METHOD_TEMPLATE).substitute(\n name=name, params=params, doc=doc\n )", "def convert_html():\n return", "def test_themes_escape_html():\n app = create_ctfd()\n with app.app_context():\n user = gen_user(app.db, name=\"<script>alert(1)</script>\")\n user.affiliation = \"<script>alert(1)</script>\"\n user.website = \"<script>alert(1)</script>\"\n user.country = \"<script>alert(1)</script>\"\n\n with app.test_client() as client:\n r = client.get(\"/users\")\n assert r.status_code == 200\n assert \"<script>alert(1)</script>\" not in r.get_data(as_text=True)\n destroy_ctfd(app)", "def wikihtml(func):\r\n\r\n def wrapper(*args):\r\n\r\n title, summary, url = func(*args)\r\n\r\n if title is nan:\r\n title = \"\"\r\n\r\n if summary is nan:\r\n summary = \"No summary available on Wikipedia.\"\r\n\r\n html = textwrap.dedent(\"\"\"\r\n <br>\r\n <p>\r\n {summary}\r\n </p>\r\n <br>\r\n \"\"\".format(summary=summary))\r\n\r\n return {\"title\": title, \"text\": html, \"url\": url}\r\n\r\n return wrapper", "def secure_page():\n return render_template('secure_page.html')", "def strip_html(unclean):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])", "def gateway(arg):\n\tassert isinstance(arg, str)\n\treturn r\"(?P<%s>[\\w_\\-@\\' \\.]+)\" % (arg,)", "def html_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n return html_escape(val)", "def callable_(arg: str) -> str:\n return '! %r !' % arg", "def url():\n ...", "def _render_tag(self, tag, query_str):\n t = Template('{%% load djblets_utils %%}'\n '{%% autoescape off %%}%s{%% endautoescape %%}'\n % tag)\n\n request = HttpRequest()\n\n if query_str:\n request.GET = QueryDict(query_str)\n\n return t.render(Context({\n 'request': request,\n })).replace('&amp;', '&')", "def renderHTTP(req):", "def strip_html(unclean, tags=[]):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])", "def index():\n\n page = \"\"\"\n <h1>Calculator</h1>\n <div>Directions. This app will calculate 2 or more numbers provided in the url string. To use:\n <ol>\n <li>Type in http://localhost:8080/</li>\n <li>Type in the arithmetic operation (add, subract, multiply, divide) followed by /</li>\n <li>Type in numbers. Between each number include a /</li>\n <li>For example, http://localhost:8080/add/5/10/</li>\n </ol></div>\n <h2>Tests:</h2><ul>\n <li><a href=\"http://localhost:8080/add/5/10/15\">Addition</a></li>\n <li><a href=\"http://localhost:8080/subtract/100/50/25\">Subraction</a></li>\n <li><a href=\"http://localhost:8080/multiply/5/10/15\">Multiplication</a></li>\n <li><a href=\"http://localhost:8080/divide/100/50\">Division</a></li>\n \"\"\"\n return page", "def render_string(_str):\n\t\treturn str.encode(_str)", "def simplecatcher(application):\n def simplecatcher_app(environ, start_response):\n try:\n return application(environ, start_response)\n except:\n out = StringIO()\n traceback.print_exc(file=out)\n start_response('500 Server Error',\n [('content-type', 'text/html')],\n sys.exc_info())\n res = out.getvalue()\n return ['<h3>Error</h3><pre>%s</pre>'\n % html_quote(res)]\n return simplecatcher_app", "def html_link_to_tag(string, input_id, proc):\n return html_simple_element(\n string, \"a\", 'href=\"#' + proc + \"_\" + normalise_tag_id(input_id) + '\"'\n )", "def hello(environ, start_response):\n # get the name from the url if it was specified there.\n args = environ['myapp.url_args']\n if args:\n subject = escape(args[0])\n else:\n subject = 'World'\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [str.encode('''Hello %(subject)s\n Hello %(subject)s!\n\n''' % {'subject': subject})]", "def handle(req: bytes) -> str:\n\n try:\n pass\n except:\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, 'html', 'upload.html')\n\n with (open(path, 'r')) as file:\n html = file.read()\n\n return html", "def xss_strip_unsafe_tags(s):\n return htmlsanitizer._sanitizeHTML(s, 'utf-8', None)", "def welcome():\n\n html = \"\"\"\n <html>\n <body>\n <h1>welcome</h1>\n </body>\n </html> \n \"\"\"\n\n return html", "def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def _transform(func_name):\n\n def wrapped(self, *args, **kwargs):\n replacement_string = _query_super(func_name)(self, *args, **kwargs)\n to_string = []\n char_counter = 0\n for index in range(0, len(self._raw_string)):\n if index in self._code_indexes:\n to_string.append(self._raw_string[index])\n elif index in self._char_indexes:\n to_string.append(replacement_string[char_counter])\n char_counter += 1\n return ANSIString(\n \"\".join(to_string),\n decoded=True,\n code_indexes=self._code_indexes,\n char_indexes=self._char_indexes,\n clean_string=replacement_string,\n )\n\n return wrapped", "def renderHTTP(self, ctx):\n return url.URL.fromContext(ctx).child('')", "def clean_for_html(cls, value):\r\n return cls._clean(value, INVALID_HTML_CHARS)", "def welcome_home():\n\n html = \"\"\"\n <html>\n <body>\n <h1>welcome home</h1>\n </body>\n </html> \n \"\"\"\n\n return html", "def render_callable(self, inner_template_name, arg_string, outer_args):\r\n # First render the arg_string (mustache doesn't do this for you, and it may itself\r\n # contain mustache constructs).\r\n rendered_arg_string = self.render(arg_string, outer_args)\r\n # Parse the inner args as CGI args.\r\n inner_args = dict([(k, v[0]) for k, v in urlparse.parse_qs(rendered_arg_string).items()])\r\n # Order matters: lets the inner args override the outer args.\r\n args = dict(outer_args.items() + inner_args.items())\r\n # Render.\r\n return self.render_name(inner_template_name, args)", "def plain(app, n, _inTuple=False, _asString=False, explain=False, **options):\n\n return render(app, False, n, _inTuple, _asString, explain, **options)", "def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'", "def clean_html_content(field_name):\n\n @check_field_is_empty(field_name)\n def wrapped(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n from HTMLParser import HTMLParseError\n\n content = self.cleaned_data.get(field_name)\n\n # clean_html_content is called when writing data into GAE rather than\n # when reading data from GAE. This short-circuiting of the sanitizer\n # only affects html authored by developers. The isDeveloper test for\n # example allows developers to add javascript.\n if user_logic.isDeveloper():\n return content\n\n try:\n cleaner = HtmlSanitizer.Cleaner()\n cleaner.string = content\n cleaner.clean()\n except (HTMLParseError, safe_html.IllegalHTML), msg:\n raise forms.ValidationError(msg)\n\n content = cleaner.string\n content = content.strip().replace('\\r\\n', '\\n')\n\n return content\n\n return wrapped", "def part_render(self, attr, *a, **kw):\r\n style = kw.get('style', 'html')\r\n template = self.template(style)\r\n dt = template.get_def(attr)\r\n return unsafe(dt.render(thing = self, *a, **kw))", "def escaped_url(url):\n f = furl.furl(url)\n f.username = None\n f.password = None\n return f.tostr()", "def render_string(self, template: str, **vars) -> str:", "def clean(self, text):\n if not isinstance(text, six.string_types):\n raise TypeError('argument must of text type')\n\n if not text:\n return u''\n\n text = force_unicode(text)\n\n dom = self.parser.parseFragment(text)\n filtered = BleachSanitizerFilter(\n source=self.walker(dom),\n\n # Bleach-sanitizer-specific things\n attributes=self.attributes,\n strip_disallowed_elements=self.strip,\n strip_html_comments=self.strip_comments,\n\n # html5lib-sanitizer things\n allowed_elements=self.tags,\n allowed_css_properties=self.styles,\n allowed_protocols=self.protocols,\n allowed_svg_properties=[],\n )\n\n # Apply any filters after the BleachSanitizerFilter\n for filter_class in self.filters:\n filtered = filter_class(source=filtered)\n\n return self.serializer.render(filtered)", "def welcome_back():\n\n html = \"\"\"\n <html>\n <body>\n <h1>welcome back</h1>\n </body>\n </html> \n \"\"\"\n\n return html", "def application(environ, start_response):\n\n headers = [('Content-type', 'text/html')]\n\n try:\n path = environ.get('PATH_INFO', None)\n if path is None:\n raise NameError\n\n func, args = resolve_path(path)\n status = \"200 OK\"\n body = func(*args)\n except NameError:\n status = '404 Not Found'\n body = \"<h1>Not Found</h1>\"\n except Exception:\n status = \"500 Internal Server Error\"\n body = \"<h1>Internal Server Error</h1>\"\n print(traceback.format_exc())\n finally:\n headers.append(('Content-length', str(len(body))))\n start_response(status, headers)\n return [body.encode('utf8')]", "def html_escape(text):\n return escape(text, escape_table)", "def csrf_exempt(view_func):\r\n # We could just do view_func.csrf_exempt = True, but decorators\r\n # are nicer if they don't have side-effects, so we return a new\r\n # function.\r\n def wrapped_view(*args, **kwargs):\r\n return view_func(*args, **kwargs)\r\n wrapped_view.csrf_exempt = True\r\n return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)", "def appstr(app):\n ...", "async def html(\n self, *, encoding: Optional[str] = None, errors: str = \"strict\") -> str:\n return await self._aws_text(encoding=encoding, errors=errors)", "def xml_safe(value):\n return CONTROL_CHARACTERS.sub('?', value)", "def xml_safe(value):\n return CONTROL_CHARACTERS.sub('?', value)", "def _sanitize(opt, value):\n return value if not opt.secret else '*' * 4", "def render_func(raw_str: str) -> str:\n try:\n rendered_str = raw_str.format(**live_context)\n except KeyError as err:\n raise SQLTemplaterError(\n \"Failure in Python templating: {}. Have you configured your \"\n \"variables? https://docs.sqlfluff.com/en/stable/\"\n \"configuration.html#templating-configuration\".format(err)\n )\n return rendered_str", "def html(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponse, *args, **kwargs)", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def display_error():\n return flask.jsonify(flask.request.args)", "def render_str(self, template, **params):\n return render_str(template, **params)", "def web_test(fn):\n def modified_fn(x):\n if not settings.TEST_WITH_WEB:\n logger.warning('Skip test (without web): %s' % fn.__name__)\n else:\n return fn(x)\n\n return modified_fn", "def strip_html(text: str, **serializer_kwargs: bool):\n cleaner = get_cleaner(**serializer_kwargs)\n text = cleaner.clean(text)\n return text", "def test_make_fname_js_safe_no_change():\n\n safe = \"abc\"\n expected = \"abc\"\n\n assert expected == u.make_fname_js_safe(safe)", "def raw_string(seq):\n\n def f(s):\n \"\"\" Filter latex \"\"\"\n r = s.replace('\\\\', '\\\\\\\\').replace('_', '\\_').replace('^', '\\^')\n return r\n\n return [ f(k) for k in seq ]", "def render_str(template, **params):\n t = env.jinja_env.get_template(template)\n return t.render(params)", "def smart_urlquote_wrapper(matched_url):\n try:\n return smart_urlquote(matched_url)\n except ValueError:\n return None", "def recept(self, text, *args, **kwargs):\n return text", "def about():\n return 'about Python Flask'", "def validate(self, template: str, func: Callable):\n raise NotImplementedError", "def rawHTMLrendered(self):", "def apply(self, method, url_template, *args, **kwargs):\n\turl_suffix = url_template\n\treturn self.url_call(method, url_suffix, kwargs).read()", "def index(self):\n return \"\"\"\n<html><body>\n<pre>\n Hello World!\n\n To send a message, please point to:\n http://server:port/msg?msgType=YOURTYPE&payload=YOURPAYLOAD\n</pre>\n</body></html>\n \"\"\"", "def string_ids(f):\n\n\t@functools.wraps(f)\n\tdef wrapper(self, *args):\n\t\treturn f(self, *[str(arg) for arg in args])\n\n\treturn wrapper", "def render_str(template, **params):\n\n template_jinja = jinja_env.get_template(template)\n return template_jinja.render(params)", "def modify_html(html):\n def modify_attr(attr):\n nonlocal html\n html = re.sub(\n r'(?<=' + attr + r'=\")(?!/)',\n '/static/bundle/',\n html\n )\n modify_attr('src')\n modify_attr('href')\n return html", "def webstreaming_func():\n return render_template('webstreaming.html')", "def test_script_tags(self):\n testString = sanitize('<script>Do some bad stuff</script>')\n self.assertEqual(\n testString,\n '&lt;script&gt;Do some bad stuff&lt;/script&gt;'\n )", "def render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def home():\n return \"Hello, this is the first page <h1>Hey</h1>\"" ]
[ "0.60575014", "0.5710384", "0.54833734", "0.54531986", "0.52721405", "0.52415216", "0.5219625", "0.51628417", "0.51594687", "0.5052998", "0.50139946", "0.5011002", "0.5004929", "0.4984093", "0.49426973", "0.49259076", "0.49226424", "0.48769718", "0.48679265", "0.48450527", "0.48097062", "0.48082182", "0.47947422", "0.47917923", "0.47887138", "0.478794", "0.47780767", "0.47674096", "0.47654408", "0.4764804", "0.47582558", "0.47504336", "0.4749071", "0.47480264", "0.47218618", "0.46916455", "0.46887806", "0.467316", "0.46688566", "0.46659797", "0.4661755", "0.46594018", "0.464903", "0.46456784", "0.46423", "0.46267557", "0.46256545", "0.4614712", "0.46086866", "0.4606692", "0.4604277", "0.46041876", "0.4603872", "0.45967275", "0.4586032", "0.4580413", "0.45789778", "0.45750928", "0.45745873", "0.456915", "0.4567449", "0.45639858", "0.45632824", "0.45627743", "0.45491102", "0.4548228", "0.4539163", "0.45376417", "0.45257616", "0.4524924", "0.45230517", "0.45225024", "0.45184255", "0.45184255", "0.44892925", "0.4487793", "0.4483722", "0.4483102", "0.44825742", "0.44807675", "0.44794556", "0.44706059", "0.44670492", "0.4464974", "0.44588992", "0.44543183", "0.44517863", "0.4447898", "0.44476563", "0.44446495", "0.44422653", "0.44399917", "0.4437521", "0.44367704", "0.44331786", "0.4431049", "0.44304502", "0.4425688", "0.44211948", "0.44200096" ]
0.5955667
1
Loads an observations CSV file.
def load_timeline(filename): try: # Create an empty timeline timeline = ObservationTimeline() # Dictionary mapping agent's name to held item carrying = {} # Read data from input file with open(filename, newline='') as csvfile: obs = csv.reader(csvfile, delimiter='\n') for row in obs: # Unpack each row col = tuple(row[0].split(',')) # If too many or too few arguments, Error if not len(col) == 4: raise ValueError("Unpacking row error") # Adds observation to timeline timeline.add(Observation(col[0], col[1], col[2])) # If agent is carrying item, add to timeline if not col[3] == '': carrying.update({col[0]: col[3]}) # Return Tuple of carried item dict and ObsTimeline return (carrying, timeline) except OSError: raise OSError("Cannot open file")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCSV(input_file):", "def read_csv(self, filepath, obs_vars = ['obs'], header = True):\n # determine if the type file is gzip\n filetype, encoding = mimetypes.guess_type(filepath)\n if encoding == 'gzip':\n self.data = pd.read_csv(filepath, compression='gzip')\n else:\n self.data = pd.read_csv(filepath)\n\n self.original_data = copy.deepcopy(self.data)\n if self.cutoff:\n self.data = self.data[:self.cutoff]\n \n self.data = self.data[obs_vars]\n self.N = self.data.shape[0]\n return True", "def read_csv():", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def read_csv_file(self):\n pass", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def load_annotations(self):\n fname, aux = QFileDialog.getOpenFileName(self, 'Open file', '', \"(*.csv)\")\n if fname != '':\n self.model.AnnotationLoad(fname=fname)", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def read_test_csv(self, file_path, header=True):\n BasePredictor.read_test_csv(self, file_path, header)\n self.obs = np.array(self.obs, dtype=np.int32)\n return", "def load_obs_csv(self, csv_file, date_fmt=\"%Y/%m/%d %H:%M\", mission_lst=None, only_geom=False):\n\n try:\n obs_data = np.loadtxt(csv_file, delimiter=',', dtype='str')\n msg = \"observation data loaded from file ***{}***\".format(csv_file)\n FileLogger.info(msg)\n except IOError as exc:\n msg = \"could not load observations from csv file ***{}***\".format(csv_file)\n msg += \" ({})\".format(exc)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n\n nt,ncol = obs_data.shape\n date_lst = [ dt.datetime.strptime(obs_data[i,0], date_fmt) for i in xrange(nt) ]\n date_a = np.array(date_lst)\n time_start_data = date_lst[0]\n time_end_data = date_lst[-1]\n #-- logging\n msg = \"detected ntimepts={} #columns={} in csv file\".format(nt, ncol)\n FileLogger.info(msg)\n\n #-- potential adjustment to specified temporal domain\n if self.time_start!=None:\n time_start = self.time_start\n else:\n time_start = time_start_data\n if self.time_end!=None:\n time_end = self.time_end\n else:\n time_end = time_end_data\n\n #-- first 8 columns are always:date, vza, vaa, sza, saa, sat_flag, lat, lon\n\n if ncol==10:\n msg = \"start reading S1 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, vh, vv\n vh_lst = []\n vv_lst = []\n self.obs_dct['S1'] = ObsTable()\n self.obs_dct['S1'].geom = satgeo.SensorGeometry()\n self.obs_dct['S1'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S1'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon,lat (columns 5,6) not needed\n #-- satellite flag (column 7)\n self.obs_dct['S1'].sat_id_lst.append(act_mission)\n #-- VH,VV in 0-indexed columns 8,9\n vh_lst.append( float(obs_data[i,8]) )\n vv_lst.append( float(obs_data[i,9]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n\n #-- turn into arrays\n vh = np.array(vh_lst)\n vv = np.array(vv_lst)\n #-- logging\n msg = \"observational backscatter values are assumed to be in linear units!\"\n FileLogger.info(msg)\n msg = \"VH backscatter values read: VH[linear] min/max={}/{}\".format(\n vh.min(), vh.max())\n FileLogger.info(msg)\n msg = \"VV backscatter values read: VV[linear] min/max={}/{}\".format(\n vv.min(), vv.max())\n FileLogger.info(msg)\n #-- uncertainty computation\n #-- XX_db = XX_db(XX) = 10*log10(XX)\n #-- XX = XX(XX_db) = 10**(XX_db/10)\n #\n # for the uncertainty in linear/raw unit we apply conservative estimation:\n # 2*sXX = [ XX(XX_db+sXX_db) - XX(XX_db-sXX_db) ] (XX=VH,VV)\n # = [ XX(XX_db)*10**(sXX_db/10.) - XX(XX_db)*10**(-sXX_db/10.)]\n # = XX(XX_db)*[10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n # = XX * [10**(sXX_db/10.) - 10**(-sXX_db/10.)]\n ds = 0.5* (10**(self.s1_unc_db/10.) - 10**(-1*self.s1_unc_db/10.))\n #-- S1 uncertainty floor *may* be user-supplied\n if self.s1_vv_uncfloor!=None:\n dsvv_floor = self.s1_vv_uncfloor\n else:\n dsvv_floor = 10**(self.s1_floor_db/10.)*ds\n if self.s1_vh_uncfloor!=None:\n dsvh_floor = self.s1_vh_uncfloor\n else:\n dsvh_floor = 10**(self.s1_floor_db/10.)*ds\n msg = \"assuming S1 observational uncertainty of {} [dB] \".format(self.s1_unc_db)\n msg += \"yields relative uncertainty of {} [linear unit].\".format(ds)\n FileLogger.info(msg)\n msg = \"assuming vv={} vh={} S1 observational uncertainty floor [linear unit].\".format(\n dsvv_floor, dsvh_floor)\n FileLogger.info(msg)\n svh = np.maximum(vh*ds, dsvh_floor)\n svv = np.maximum(vv*ds, dsvv_floor)\n #-- apply floor value\n nlo_svh = np.count_nonzero(vh*ds<dsvh_floor)\n nlo_svv = np.count_nonzero(vv*ds<dsvv_floor)\n svh = np.maximum(svh, dsvh_floor)\n svv = np.maximum(svv, dsvv_floor)\n msg = \"number of applied uncertainty floor values on VH={} VV={}\".format(\n nlo_svh, nlo_svv)\n FileLogger.info(msg)\n msg = \"determined VH uncertainty in linear units, min/max={}/{}\".format(\n svh.min(), svh.max())\n FileLogger.info(msg)\n msg = \"determined VV uncertainty in linear units, min/max={}/{}\".format(\n svv.min(), svv.max())\n FileLogger.info(msg)\n #-- potential filtering of polarisations\n if not self.s1_pol is None:\n if not 'VH' in self.s1_pol:\n vh = self.obs_fill_value\n svh = self.obs_fill_value\n if not 'VV' in self.s1_pol:\n vv = self.obs_fill_value\n svv = self.obs_fill_value\n #-- \n nt_use = len(sat_geom.date_utc)\n self.obs_dct['S1'].data = np.empty((nt_use,2), dtype=np.float64) #-- 'VH','VV'\n self.obs_dct['S1'].data[:,0] = vh\n self.obs_dct['S1'].data[:,1] = vv\n self.obs_dct['S1'].dataunc = np.empty((nt_use,2), dtype=np.float64)\n self.obs_dct['S1'].dataunc[:,0] = svh\n self.obs_dct['S1'].dataunc[:,1] = svv\n #-- logging\n msg = \"...reading S1 observations DONE\"\n FileLogger.info(msg)\n else:\n #-- logging\n msg = \"start reading S2 observations...\"\n FileLogger.info(msg)\n # date, vza, vaa, sza, saa, sat_flag, lat, lon, BRF1,...,BRF13\n self.obs_dct['S2'] = ObsTable()\n self.obs_dct['S2'].geom = satgeo.SensorGeometry()\n self.obs_dct['S2'].sat_id_lst = []\n #-- abreviate\n sat_geom = self.obs_dct['S2'].geom\n sat_geom.date_utc = []\n sat_geom.vza = []\n sat_geom.vaa = []\n sat_geom.sza = []\n sat_geom.saa = []\n brf_lst = [ [] for i in xrange(NB_S2) ] #-- prepare lists for 13 BRF bands\n for i,act_date in enumerate(date_lst):\n if act_date<time_start:\n continue\n elif act_date>time_end:\n break\n #-- actual satellite/mission\n act_mission = obs_data[i,7].upper()\n if mission_lst!=None and not act_mission in mission_lst:\n msg = \"observation at date {} is from mission={} and ignored here.\".format(\n act_date.strftime('%Y-%m-%dT%H:%M'), act_mission)\n FileLogger.info(msg)\n continue\n #-- read actual geometry\n sat_geom.date_utc.append(act_date)\n sat_geom.vza.append( float(obs_data[i,1]) )\n sat_geom.vaa.append( float(obs_data[i,2]) )\n sat_geom.sza.append( float(obs_data[i,3]) )\n sat_geom.saa.append( float(obs_data[i,4]) )\n #-- lon/lat in columns 5, 6 not used here\n #-- satellite flag\n self.obs_dct['S2'].sat_id_lst.append(obs_data[i,7])\n #-- BRFs start at 0-indexed column 8 in data csv file\n for ib in xrange(NB_S2):\n icol = ib+8\n brf_lst[ib].append( float(obs_data[i, icol]) )\n\n #-- geometries/satellite flags are done here\n if only_geom:\n return\n #--\n nt_use = len(sat_geom.date_utc)\n brf_data = np.empty((nt_use,NB_S2), dtype=np.float64) #-- BRF1-13\n for ib in xrange(NB_S2):\n brf_data[:,ib] = np.array(brf_lst[ib])\n #-- check observational consistency\n nneg = np.count_nonzero( brf_data<0 )\n if nneg>0:\n msg = \"detected negative BRF values: nneg={}.\".format(nneg)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data<0 ] = self.obs_fill_value\n nhi = np.count_nonzero( brf_data>1 )\n if nhi>0:\n msg = \"detected high BRF outlier values>1: nout={}.\".format(nhi)\n msg += \" These will be set to fill-value!\"\n FileLogger.warn(msg)\n brf_data[ brf_data>1 ] = self.obs_fill_value\n\n #-- data uncertainty\n msg = \"BRF uncertainty is derived by applying {} relative uncertainty, \".format(\n self.s2_relunc)\n msg += \"and an uncertainty floor value of {}\".format(self.s2_uncfloor)\n FileLogger.info(msg)\n brf_dataunc = np.maximum(brf_data*self.s2_relunc, self.s2_uncfloor)\n brf_dataunc[ brf_dataunc<0 ] = self.obs_fill_value\n brf_dataunc[ brf_data==self.obs_fill_value ] = self.obs_fill_value\n #-- restriction to seleted bands\n if not self.s2_bnds is None:\n bnd_msk = np.ones((NB_S2,), dtype=np.bool)*True\n bnd_msk[self.s2_bnds] = False\n brf_data[:,bnd_msk] = self.obs_fill_value\n brf_dataunc[:,bnd_msk] = self.obs_fill_value\n #-- set into structure\n self.obs_dct['S2'].data = brf_data\n self.obs_dct['S2'].dataunc = brf_dataunc\n #-- logging\n msg = \"...reading S2 observations DONE\"\n FileLogger.info(msg)", "def get_data(self, csv_file):\n pass", "def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns", "def load(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tif os.path.isfile(csvPath):\n\t\t\tprint('todo: load from', csvPath)\n\t\t\tself.dfAnalysis = pd.read_csv(csvPath, header=0)\n\t\t\tself.updateAnalysisPlot()\n\t\telse:\n\t\t\tprint('did not find saved file csvPath:', csvPath)", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()", "def load_csv(filename, dialect='excel', encoding='utf-8'):\n return Context.fromfile(filename, 'csv', encoding, dialect=dialect)", "def load_csv(fname = data_indoor):\n \n reader = csv.reader(open(fname, 'r'))\n \n # Blank list\n data = []\n \n # Don't read the zeroth element of each row (image name), convert to float.\n for row in reader:\n data.append(map(float, row[1:]))\n \n # Convert list to array \n d = np.array(data)\n \n # Seperate labels from features\n Y = d[:,0]\n X = d[:,1:]\n \n return X,Y", "def _load_csv(self, file_path):\n csv_ds = tf.data.experimental.CsvDataset(\n file_path, self._default_csv_values,\n header=True,\n field_delim=CsvFilesDataset.CSV_SEPARATOR,\n use_quote_delim=False,\n select_cols=self._feature_column_indices\n )\n\n # Map to dictionary with column names\n if self.debug_columns:\n csv_ds = csv_ds.enumerate()\n csv_ds = csv_ds.map(lambda *row: self._map_csv_row_to_dict_with_debug(file_path, row))\n else:\n csv_ds = csv_ds.map(\n lambda *row: { feature_column_name: csv_column_values for feature_column_name, csv_column_values in zip(self._feature_column_names, row) }\n )\n\n # Get CSV file sequences\n csv_ds = self._map_csv_file_to_sequences(csv_ds, file_path)\n\n # Remove train column (avoid keras warning about unused inputs)\n if self._data_definition.trainable_column:\n csv_ds = csv_ds.map(self.remove_trainable_column)\n\n return csv_ds", "def __load_csv_into_mem(label, exp, obj, norms):\n filename = obj.get('file')\n # def csv_loader\n label_pos = obj.get('label', 'first')\n if label_pos == 'first':\n label_first = True\n else:\n label_first = False\n\n labels = []\n\n def get_element_from_csv():\n def callback(dim, lbl):\n CSVDataset.__load_csv_into_mem.dimension = dim - 1\n for l in lbl:\n labels.append(l)\n\n with open(filename, 'r') as f:\n for i in CSVDataset.__get_element_from_file__(csv.reader(f), label_first, norms, callback):\n yield i\n\n input_data = np.fromiter(get_element_from_csv(), dtype=np.float32)\n dimension = CSVDataset.__load_csv_into_mem.dimension\n input_data = input_data.reshape((-1, dimension))\n # print input_data[0]\n labels = np.asarray(labels, 'int32')\n kwargs = {}\n if 'batches' not in kwargs:\n b = getattr(exp.args, '%s_batches' % label, None)\n kwargs['batches'] = b\n if 'size' not in kwargs:\n kwargs['size'] = exp.args.batch_size\n kwargs['label'] = label\n return SequenceDataset(*(input_data, labels), **kwargs)", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data", "def load_data(file_path):\n data = pandas.read_csv(file_path)\n\n return data", "def CSV_Load_File( self, infilename ):\n print( 'Loading \"{}\"'.format(infilename) )\n IN = open( infilename, 'r' )\n standname = None\n laststand = None\n for L in IN:\n if( L[0:9] == 'Site/Plot' ): continue\n col = L.split( ',' )\n standname = col[0]\n year = int(col[1])\n #if( re.search( '-', standname ) != None ):\n # loc = re.search( '-', standname )\n # year = int(standname[loc.start()+1:])\n # standname = standname[0:loc.start()]\n #print standname, year\n if( (standname != None ) & (standname != laststand) ): self.Data.Stand[standname] = StandData( standname )\n (treeno, species, dbh, ht, live, status, cclass, tpa) = \\\n (int(col[2]), col[3], float(col[4]), float(col[5]), col[6], col[7], int(float(col[8])), float(col[9]))\n if( OPT['d'] ):\n if( dbh > 10.0 ): dbh *= 1.25\n if( dbh > 15.0 ): dbh *= 1.50\n for t in range( 1, int( math.ceil( tpa ))+1, 1 ):\n ntree = len( self.Data.Stand[standname].Tree ) + 1\n self.Data.Stand[standname].Tree[ntree] = TreeData( species, TreeNumber=treeno )\n self.Data.Stand[standname].Tree[ntree].Year[year] = MeasurementData( dbh, ht, '', 1, live, status, cclass )\n laststand = standname\n IN.close()", "def load_data(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def _load_data(filename):\n\n def str2date(s):\n \"\"\"Converts a string to a datetime\"\"\"\n return datetime.strptime(s.decode(), \"%Y-%m-%d %H:%M:%S\")\n\n # Load the data\n return np.recfromcsv(filename, converters={0: str2date}, comments=\"#\")", "def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def loadCSVFile(self):\n file_name = \"files/parts.csv\"\n\n with open(file_name, \"r\") as csv_f:\n reader = csv.reader(csv_f)\n header_labels = next(reader)\n self.model.setHorizontalHeaderLabels(header_labels)\n for i, row in enumerate(csv.reader(csv_f)):\n items = [QStandardItem(item) for item in row]\n self.model.insertRow(i, items)", "def _load(self, config: Dict):\n return pd.read_csv(config['path'])", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def loadData(catalog, accidentsfile):\n accidentsfile = cf.data_dir + accidentsfile\n input_file = csv.DictReader(open(accidentsfile, encoding=\"utf-8\"),\n delimiter=\",\") \n for accident in input_file:\n model.addAccident(catalog,accident)", "def load_csv(file_path, access_mode = \"r\"):\n with open(file_path, access_mode) as f:\n return list(csv.reader(f))", "def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def load_from_table(filename: str) -> List[Sample]:\n with open(filename, \"r+\", newline=\"\") as f:\n reader = csv.reader(f)\n return samples_from_iterator(reader)", "def load_dataset_csv(filename, path=DATASETS):\n fpath = qualify_full_filepath(f\"{filename}.csv\", path)\n with open(fpath, \"r\", newline=\"\\n\") as infile:\n data = [row for row in csv.DictReader(infile)]\n return data", "def loadData(path_file):\n data = pd.read_csv(path_file) \n data.head()\n return data", "def load_obs_data(obsfile, domain):\n\n if not os.path.isfile(obsfile):\n LOGGER.error(f\"{obsfile} does not exist\")\n sys.exit()\n\n LOGGER.info(f\"Loading observed TC tracks from {obsfile}\")\n best = pd.read_csv(obsfile, skiprows=[1],\n usecols=[0, 6, 8, 9, 11, 95, 113],\n na_values=[' '],\n parse_dates=[1])\n best.rename(columns={'SID': 'num', 'LAT': 'lat', 'LON': 'lon',\n 'WMO_PRES': 'pmin', 'BOM_WIND': 'vmax',\n 'BOM_POCI': 'poci'}, inplace=True)\n best = best[best.poci.notnull() & best.pmin.notnull()]\n best['pdiff'] = best.poci - best.pmin\n best = best[best.pdiff > 1]\n obstc = filter_tracks_domain(best, *domain)\n return obstc", "def ReadFromCSV(filename):\n\n class LocationTable(csvtable.CSVTable):\n\n def __init__(self, name, headings):\n super(LocationTable, self).__init__(name, headings)\n self.data = []\n\n def AddRow(self, row):\n self.CheckRow(row)\n self.data.append(Interval(*row))\n\n data = csvtable.ReadMultitableCSV(filename, LocationTable)\n expected_tables = [\"BUSINESSTRIPS\", \"RESIDENCE\"]\n\n if sorted(data.keys()) != expected_tables:\n raise ValueError(\"Unexpected tables.\\n Expected: %s\\n Found: %s\" % (\n sorted(data.keys()), expected_tables))\n\n return TaxCalendar(data[\"RESIDENCE\"].data, data[\"BUSINESSTRIPS\"].data)", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def _load(self, filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n self.events = list(reader)", "def load_info(self, file_path):\r\n info = pd.read_csv(file_path, header=0, index_col=0)\r\n self.set_info(info)\r\n logger.info(f'{self} load info')", "def load(self):\n data = pandas.read_csv(self.path, names=self.names)\n return data", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def load_symbol_universe_data_from_csv(self, csv_fullpath):\n\n print(\"[{}] [INFO] Loading symbol universe data from csv...\".format(datetime.now().isoformat()))\n\n df = pd.read_csv(csv_fullpath)\n\n #--------------------------------------------------------------------------\n # Convert date column to type numpy datetime64.\n #--------------------------------------------------------------------------\n df.date = pd.to_datetime(df.date)\n\n return df", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def load_data(self, filename: str) -> None:\n if os.path.exists(filename):\n self.df = pd.read_csv(filename)\n logging.info(f\"Data loaded from {filename}\")\n else:\n raise FileNotFoundError(f\"{filename} does not exist.\")", "def __load(self, file_path):\n self.series = read_csv(file_path, header=0)\n self.series = self.__extract_series_per_country(self.countries).transpose().iloc[1:]\n self.series.index.names = ['Date']\n self.series.index = pd.to_datetime(self.series.index)\n self.series.columns = self.countries", "def initialize_from_file(filename):\r\n df = pd.read_csv(filename)\r\n return df", "def test_load_with_csv(self):\n\n corpus = Corpus(\n common.TEST_CORPUS_PATH,\n csv_path=common.LARGE_TEST_CORPUS_CSV,\n name='test_corpus',\n )\n assert len(corpus) == 99\n assert isinstance(corpus.documents, list)\n assert corpus.name == 'test_corpus'", "def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def load_csvFile(file_location, file_name,sep,encoding):\n try:\n fullpath=file_location+file_name\n df = pd.read_csv(fullpath, encoding=encoding,sep=sep)\n return df\n except IOError:\n print('Error loading the file: ' , file_name)\n sys.exit(1)", "def csv_data_loader(path):\n\n logging.info(\"Loading file using SparkSession\")\n csvload = Spark.instance.spark() \\\n .read \\\n .format(\"csv\") \\\n .options(header=True) \\\n .options(mode=\"DROPMALFORMED\")\n\n return csvload.option(\"inferSchema\", \"true\").load(path)", "def load_data(path_to_data, outcome):\n df = pd.read_csv(path_to_data)\n\n mean_temp = df['meanTempDegree'].values\n daily_temp = df['dailyTempCat'].values\n obs_mean = df['lnRr_' + outcome].values\n obs_std = df['se_' + outcome].values\n study_id = df['adm1'].values\n data_id = np.arange(df.shape[0])\n\n valid_id = ~(np.isnan(obs_std) |\n np.isnan(obs_mean) |\n np.isinf(obs_std) |\n np.isinf(obs_mean))\n\n mean_temp = mean_temp[valid_id]\n daily_temp = daily_temp[valid_id]\n obs_mean = obs_mean[valid_id]\n obs_std = obs_std[valid_id]\n study_id = study_id[valid_id]\n data_id = data_id[valid_id]\n\n return utils.TempData(mean_temp,\n daily_temp,\n obs_mean,\n obs_std,\n study_id,\n data_id)", "def read_csv(self, filepath, header=True):\n BaseSampler.read_csv(self, filepath, header)\n # convert the data to floats\n self.new_obs = []\n self.img_w, self.img_h = None, None\n for row in self.obs:\n if self.img_w is None:\n self.img_w = int(row[0])\n if self.img_w == 0 or (len(row)-1) % self.img_w != 0:\n raise Exception('The sampler does not understand the format of the data. Did you forget to specify image width in the data file?')\n self.new_obs.append([int(_) for _ in row])\n\n self.obs = np.array(self.new_obs)[:,1:]\n if self.cl_mode:\n self.d_obs = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf=self.obs.astype(np.int32))\n\n self.d = self.obs.shape[1]\n self.img_h = int(self.d / self.img_w)\n self.alpha = float(self.N) * 5\n return", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def load(csvfile):\n return PsychoPyCSV(csvfile)", "def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def load_data_csv(metric='rank'):\n if metric == 'rank':\n path = base_data_path + 'song_data_rank.csv'\n elif metric == 'count':\n path = base_data_path + 'song_data_count.csv'\n else:\n return \"Please choose 'rank' or 'count' for metric.\"\n\n songs_df = pd.read_csv(path, sep=',') #, dtype={'page': str, 'ranks': np.array})\n\n literal_eval(songs_df['ranks'][0])\n # Reading in the CSV pulls the ranks in as a string, so fix that to a list.\n for rs in range(len(songs_df['ranks'])):\n songs_df['ranks'][rs] = literal_eval(songs_df['ranks'][rs])\n return songs_df", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def load_file(name):\n return pd.read_csv(join(path_here, \"syserol/data/\" + name + \".csv\"), delimiter=\",\", comment=\"#\")", "def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y", "def load_utlization(path):\n df = pd.read_csv(f\"{raw_data}\\\\{path}\", parse_dates=[\"AdmissionDate\"])\n\n df.rename(\n columns={\"MemberID\": \"member_id\", \"LOSDays\": \"los\", \"FacilityName\": \"facility\"},\n inplace=True,\n )\n\n df.columns = clean_table_columns(df.columns)\n\n facility_col = [col for col in df.columns if \"facility\" in col][0]\n\n df = cognify_facility_changes(df, facility_col)\n\n df = df[df.member_id != 1003]\n return df", "def __load_csv(filename):\n fp = open(Parser.DATA_FOLDER_PATH + filename + '.csv', 'r')\n records = []\n for line in fp:\n items = line.strip().split(',')\n x, y, z = '0', '0', '0'\n if len(items) > 1:\n x = items[1]\n if len(items) > 2:\n y = items[2]\n if len(items) > 3:\n z = items[3]\n\n values = [x, y, z]\n records.append(values)\n\n # Discard some beginning data which may be noisy\n # del records[:int(len(records) / 30)]\n n = len(records)\n\n for i in range(n):\n rec = []\n # Consider X, Y, Z axes\n for k in range(3):\n # If can convert string to float\n try:\n val = float(records[i][k])\n except ValueError:\n val = 0\n rec.append(val)\n\n # Replace it\n records[i] = rec\n return records", "def load_database(self, fsp='Species'):\n self.df_species = pd.read_csv(fsp + '.csv', header=0,\n index_col=0)", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def load_data(path):\n try:\n data = pd.read_csv(path, sep='\\t')\n except FileNotFoundError:\n logger.exception(\"Traceback of data file '{}' not found.\".format(path))\n else:\n return data", "def load_data(filepath):\n\n file_path_casted = Path(filepath)\n if not file_path_casted.exists():\n raise FileNotFoundError(\"File does not exist.\")\n\n data = pd.read_csv(filepath, delimiter=\",\")\n\n return data", "def load_data(filename):\n # create an evidence and label list\n evidence = []\n label = []\n\n # create a dictionary to hold key months matching to their respective values\n month = {'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'June': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8, 'Oct': 9,\n 'Nov': 10, 'Dec': 11}\n\n # open and read the csv file\n with open(filename) as data:\n # use the dictionary csv reader to be able to call the cell values by the csv column header names\n reader = csv.DictReader(data)\n # read each row in the csv and append the evidence and labels to their respective lists\n for row in reader:\n evidence.append([\n int(row[\"Administrative\"]),\n float(row[\"Administrative_Duration\"]),\n int(row[\"Informational\"]),\n float(row[\"Informational_Duration\"]),\n int(row[\"ProductRelated\"]),\n float(row[\"ProductRelated_Duration\"]),\n float(row[\"BounceRates\"]),\n float(row[\"ExitRates\"]),\n float(row[\"PageValues\"]),\n float(row[\"SpecialDay\"]),\n month[row[\"Month\"]],\n int(row[\"OperatingSystems\"]),\n int(row[\"Browser\"]),\n int(row[\"Region\"]),\n int(row[\"TrafficType\"]),\n 1 if row[\"VisitorType\"] == \"Returning_Visitor\" else 0,\n 1 if row[\"Weekend\"] == \"TRUE\" else 0,\n ])\n label.append(\n 1 if row['Revenue'] == 'TRUE' else 0\n )\n\n return evidence, label", "def load_data(filename):\n with open(\"./shopping.csv\", \"r\") as f:\n reader = csv.reader(f)\n next(reader)\n evidence_raw = []\n labels_raw = []\n for row in reader:\n evidence_raw.append(row[:-1])\n labels_raw.append(row[-1])\n evidence = []\n labels = []\n for row1, row2 in zip(evidence_raw, labels_raw):\n evidence.append(oneHotEncode_Evi(row1))\n labels.append(oneHotEncode_labels(row2))\n return (evidence, labels)", "def mock_data_loader(csv_path):\n file_path = KINGDOM_CSV_PATH\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def ouvrir_fichier():\r\n df = pandas.read_csv(\r\n 'ong.csv',\r\n header=2,\r\n names=[\r\n 'id',\r\n 'country',\r\n 'year',\r\n 'emissions',\r\n 'value',\r\n 'footnotes',\r\n 'source'\r\n ]\r\n )\r\n if df is None:\r\n return abort(404)\r\n else:\r\n return df", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def load_data(filename):\n \n labels = []\n evidence = []\n\n monthdict = {\n \"Jan\": 0, \"Feb\": 1, \"Mar\": 2, \"Apr\": 3, \"May\": 4, \"June\": 5, \"Jul\": 6,\n \"Aug\": 7, \"Sep\": 8, \"Oct\": 9, \"Nov\": 10, \"Dec\": 11\n }\n\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n\n for row in reader:\n evidence.append(\n [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5])] +\n [float(e) for e in row[6:9]] + [monthdict[row[10]]] +\n [int(e) for e in row[11:14]] + [0 if row[15] == \"New_Visitor\" else 1] +\n [1 if row[16] == \"TRUE\" else 0]\n )\n\n labels.append(0 if row[17] == \"FALSE\" else 1)\n \n return (evidence, labels)", "def load_from_file_csv(cls):\n list_obj = []\n if os.path.exists(cls.__name__ + \".csv\"):\n with open(cls.__name__ + \".csv\", \"r\") as _file:\n str_csv = _file.read()\n _file.close()\n _dict = Base.from_json_string(str_csv)\n for obj in _dict:\n list_obj.append(cls.create(**obj))\n return(list_obj)", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def from_csv(self, filename):\n\t\tpoints = np.genfromtxt(filename, delimiter=\",\")\n\t\tassert points.shape[1] == 2\n\n\t\tself.N = points.shape[0]\n\t\tself.points = points\n\t\tself.original_points = points", "def load_data(file_name):\n return Orange.data.Table(file_name)", "def load_metrics(fp):\r\n with open(fp) as csvfile:\r\n read = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\r\n lst = []\r\n for i in read:\r\n new_row = i[0:2] + i[7:-1]\r\n lst.append(new_row)\r\n data = np.array(lst)\r\n return data", "def restore_profile_from_csv(csv_file):\n return np.loadtxt(csv_file, delimiter=\",\", skiprows=1, usecols=range(1, 21))", "def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)", "def load_data(filename='KSI.csv'):\r\n d = []\r\n with open(filename) as csv_file:\r\n # csv_reader = csv.reader(csv_file, delimiter=',')\r\n csv_reader = csv.DictReader(csv_file, delimiter=',')\r\n for line_count, row in enumerate(csv_reader):\r\n if line_count == 0:\r\n print(f'Column names are \\n{\", \".join(row)}')\r\n # column_names = row\r\n else:\r\n d.append(row)\r\n # print(f'Processed {line_count} lines.')\r\n return d", "def get_data(filename):\r\n return pd.read_csv(filename)", "def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return", "def load(values):\n import sqlite3\n conn = sqlite3.connect('./example.db')\n df = pd.DataFrame(values)\n df.to_sql('observations', conn)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data" ]
[ "0.72104836", "0.68858993", "0.66881233", "0.6687184", "0.66345334", "0.6607736", "0.65853924", "0.6583807", "0.6527228", "0.65123886", "0.6504362", "0.6419528", "0.6413457", "0.6371477", "0.6369214", "0.63646847", "0.62995994", "0.6261016", "0.62593544", "0.62590593", "0.62523556", "0.6240575", "0.62338704", "0.62226063", "0.62195426", "0.62147194", "0.6200507", "0.61906916", "0.61905724", "0.6188359", "0.6188032", "0.6182967", "0.61828566", "0.61774313", "0.6166883", "0.6156295", "0.61523306", "0.6146016", "0.6143638", "0.61309123", "0.6126934", "0.61253387", "0.6124282", "0.6119955", "0.6109157", "0.6095939", "0.60952556", "0.6087488", "0.6081012", "0.607889", "0.6072962", "0.6062332", "0.60581595", "0.6057235", "0.6046918", "0.6027509", "0.60182714", "0.6016784", "0.6015601", "0.60124564", "0.59988457", "0.59946656", "0.5988563", "0.5982989", "0.59776586", "0.59757316", "0.5966044", "0.5965351", "0.59557515", "0.5955456", "0.59484994", "0.5936783", "0.5929242", "0.59229076", "0.5902975", "0.5897949", "0.58970696", "0.5896497", "0.58926684", "0.58914864", "0.58914", "0.589137", "0.58798885", "0.58790416", "0.587728", "0.58648026", "0.58620024", "0.5861629", "0.5860573", "0.585523", "0.58471966", "0.58467937", "0.5840501", "0.58356076", "0.58316684", "0.5831032", "0.58273584", "0.5821704", "0.58178765", "0.5813934", "0.5813431" ]
0.0
-1
Program entry point. Loads a CSV file of observations Determines how items were exchanged during various rendezvous Prints the exchanges as they happen, if desired Prints the latest owner of a specific item, if desired. Otherwise neatly prints a dictionary mapping suspects to the item they currently own. This program will return an exit code of `1` in one of two
def main(args): # Tuple of carried items and timeline time_tuple = load_timeline(args.observations) # For each Observation in list, calculated final held item for suspectPair in time_tuple[1].rendezvous(): # If user wanted exchanges, print each exchange if args.exchanges: print(suspectPair[0].name + " meets with " + suspectPair[1].name + " to exchange " + time_tuple[0][suspectPair[0].name] + " for " + time_tuple[0][suspectPair[1].name] + ".") # Trades items temp_item = time_tuple[0][suspectPair[0].name] time_tuple[0][suspectPair[0].name] = time_tuple[0][suspectPair[1].name] time_tuple[0][suspectPair[1].name] = temp_item # If no items specified or exchanges is true, # print list of final help items if (args.item == '') or (args.exchanges): pprint.pprint(time_tuple[0], indent=4) # If user specified an item, print who has said item if not args.item == '': for name, i in time_tuple[0].items(): if i == args.item: print(name + " had the " + i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n options = [\"Add\", \"Remove\", \"Update\", \"Oldest person\", \"Persons closest to average\"]\n common_options = [\"Name: \", \"Year: \"]\n file = \"model/hr/persons.csv\"\n title_list = [\"Id\", \"Name\", \"Year\"]\n choice = None\n dont_clear = False\n while choice != '0':\n if not dont_clear:\n os.system(\"clear\")\n table = data_manager.get_table_from_file(file)\n terminal_view.print_table(table, title_list)\n choice = terminal_view.get_choice_submenu(options)\n dont_clear = False\n if choice == '1':\n common.add(file, common_options)\n elif choice == '2':\n common.remove(file)\n elif choice == '3':\n common.update(file, common_options)\n elif choice == '4':\n terminal_view.print_result(hr.get_oldest_person(table), \"Oldest persons:\\n\")\n dont_clear = True\n elif choice == '5':\n msg = \"Persons with age closest to average:\\n\"\n terminal_view.print_result(hr.get_persons_closest_to_average(table), msg)\n dont_clear = True\n else:\n terminal_view.print_error_message(\"There is no such choice.\")", "def run():\n table = hr.get_hr_table_from_file()\n title_list = [\"ID\", \"Name\", \"BirthYear\"]\n options = [\"View records\",\n \"Add record\",\n \"Remove record\",\n \"Update record\",\n \"Which person is the oldest?\",\n \"Which person is the closet to average age?\"]\n\n\n choice = None\n while choice != \"0\":\n choice = terminal_view.get_choice_inner_menu(options, \"HR manager\")\n if choice == \"1\":\n terminal_view.print_table(table, title_list)\n elif choice == \"2\":\n record = terminal_view.get_inputs(title_list[1::],\"Please provide new item data\")\n table = hr.add(table, record)\n elif choice == \"3\":\n id_to_delete_table = terminal_view.get_inputs([\"ID\"],\"Item to delete\")\n id_to_delete = id_to_delete_table[0]\n table = hr.remove(table, id_to_delete)\n elif choice == \"4\":\n records = terminal_view.get_inputs(title_list,\"Edit item\")\n record_id = records[0]\n table = hr.update(table, record_id, records)\n elif choice == \"5\":\n oldest_person = hr.get_oldest_person(table)\n terminal_view.print_result(oldest_person, \"The oldest person: \")\n elif choice == \"6\":\n closest_to_average = hr.get_persons_closest_to_average(table)\n terminal_view.print_result(closest_to_average,\"The closest to average is: \")\n elif choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n\n # open links.csv in order to access IMDB id numbers\n ifile = open('movie-countries.csv', \"rb\")\n reader = csv.reader(ifile)\n \n # writer for csv with countries\n ofile = open('country_stats.csv', \"wb\")\n writer = csv.writer(ofile)\n\n # deal with headers\n reader.next() # skip first line\n writer.writerow(['country', 'number of movies', 'number of primary movies'])\n\n # one dictionary for all mention of a country, one dictionary for if the country was the first one listed\n country_count_dict = {}\n country_count_primary_dict= {}\n\n # iterate through data\n for row in reader:\n # get the countries column\n countries = row[3]\n\n # add to dicionary of countries\n for country in countries.split(\"|\"):\n country_count_dict[country] = country_count_dict.get(country, 0) + 1\n\n # if it's the primary country\n if country == countries.split(\"|\")[0]:\n country_count_primary_dict[country] = country_count_primary_dict.get(country, 0) + 1\n\n # write to the file\n for key, value in country_count_dict.iteritems():\n writer.writerow([key , str(value), country_count_primary_dict.get(key, \"0\")])\n\n ifile.close()\n ofile.close()", "def main():\n\n try:\n people = Parser.read_file(sys.argv[1])\n print(\"\\nResult:\")\n for email, person in people.items():\n print(\"{}: {}\".format(email, person))\n except RuntimeError as error:\n print(error)\n exit(1)", "def main():\n\n # Refer to Problem Set 07 README.md for instructions and tips.\n\n # 6.1: Read in < sh_basic_info.csv >\n\n basic_info = read_csv_file('sh_basic_info.csv')\n\n # 6.2: Create instances of < SuperHeroine >\n\n heroines = {}\n for hero in basic_info:\n heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'],\n hero['eye_color'], hero['hair_color'], hero['base'])\n print(heroines)\n\n # 6.3: Read in < sh_additional_info.csv >\n\n additional_info = read_csv_file('sh_additional_info.csv')\n\n # 6.4: Add powers and nemesis\n\n for row in additional_info:\n name = row[\"Heroine Name\"]\n instance_affected = heroines[name]\n how_affected = row[\"Category\"]\n value = row['Value']\n if how_affected == 'power':\n instance_affected.add_power(value)\n else:\n instance_affected.add_nemesis(value)\n\n # 6.5: Write to file\n\n write_to_file('storm.txt',heroines['Storm'])\n write_to_file('scarlet_witch.txt',heroines['Scarlet Witch'])\n write_to_file('jessica_jones.txt',heroines['Jessica Jones'])", "def main():\n\n csv_file = \"shortlist.csv\"\n team_count = 0\n participant_count = 0\n\n\n #Delete all existing teams and participants from the database.\n Team.objects.all().delete()\n Participant.objects.all().delete()\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = [row for row in reader]\n\n for item in data:\n if item[0]:\n team_count += 1\n\n t = Team.objects.create(\n name=item[0].strip(),\n idea=item[30].strip()\n )\n\n no_of_p = int(item[1])\n print item[1]\n participant_count += no_of_p\n\n p1 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[2].strip() + \" \" + item[3].strip(),\n gender=item[4].strip(),\n college=item[7].strip(),\n email=item[5].strip(),\n phone=str(item[6]),\n team=t\n )\n\n p2 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[11].strip() + \" \" +item[12].strip(),\n gender=item[13].strip(),\n college=item[16].strip(),\n email=item[14].strip(),\n phone=str(item[15]),\n team=t\n )\n\n if no_of_p == 3:\n p3 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[20].strip() + \" \" +item[21].strip(),\n college=item[25].strip(),\n gender=item[22].strip(),\n email=item[23].strip(),\n phone=str(item[24]),\n team=t\n )\n\n print \"{} teams and {} participants imported.\".format(team_count,\n participant_count)", "def main():\n\n # Read the CSV and get its content\n jobOfferList, professionsList = usefulFunctions.readCsv()\n \n # Create an empty output tab with the right number of lines and columns\n finalTab = usefulFunctions.createEmpty(jobOfferList, professionsList)\n \n # Fill the tab\n finalTab = usefulFunctions.fillTabExceptTotals(jobOfferList, professionsList, finalTab)\n \n # Update the totals \n finalTab = usefulFunctions.fillTotals(finalTab)\n \n print(\"\\nTable des métiers par profession et type de contrat : \")\n for line in finalTab:\n print(line)", "def main():\n\n # Ask for games to compare.\n games = {}\n more_games = True\n\n while more_games:\n search = input(\"Enter board game to search (leave empty if finished):\")\n\n if search:\n matches = bgg_compare.find_game(search)\n\n print(\"Games found:\")\n for game_id, name in matches.items():\n print(game_id + \"\\t\" + name)\n id = input(\"Enter the number before the intended game:\")\n games[id] = matches[id]\n\n else:\n more_games = False\n\n # If no games entered, compare all downloaded ratings.\n if not games:\n ids = []\n for f in glob.glob(\"[0-9]*.csv\"):\n id = os.path.splitext(f)[0]\n ids.append(id)\n game_info = bgg_compare.get_game_info(ids)\n for i, info in enumerate(game_info):\n name = info.find(\"name\", attrs={\"type\": \"primary\"})[\"value\"]\n games[ids[i]] = name\n\n print(\"Comparing games:\")\n\n all_ratings = []\n\n for game_id, name in games.items():\n\n print(name)\n\n ratings = {}\n filename = \"%s.csv\" % game_id\n\n try:\n ratings = bgg_compare.read_ratings(filename)\n except:\n ratings = bgg_compare.get_ratings(game_id)\n bgg_compare.write_ratings(ratings, filename)\n\n all_ratings.append(ratings)\n\n rankings = bgg_compare.condorcet_irv(all_ratings, list(games.keys()))\n\n print(\"Games ranked by Condorcet-IRV:\")\n\n header = [\"Rank\", \"ID\", \"Game\", \"Tiebreak\"]\n print(\"\\t\".join(header))\n\n for i, (game_id, tiebreak) in enumerate(rankings, 1):\n print(\"\\t\".join([str(i), game_id, games[game_id], str(tiebreak)]))\n\n outfile = input(\"Enter filename to save results (leave empty to not save)\")\n\n if outfile:\n with open(outfile, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(header)\n for i, (game_id, tiebreak) in enumerate(rankings, 1):\n writer.writerow([str(i), game_id, games[game_id], str(tiebreak)])", "def main():\n # P R O B L E M 2\n a_random_pokemon = create_entry(81, \"Magnemite\", \"Electric\", \"Steel\", 25, 35, 70, 95, 55, 45, 1, False)\n\n for key in a_random_pokemon.keys():\n print(\"{}: {}\".format(key, a_random_pokemon[key]))\n\n print(a_random_pokemon[\"Battle Stats\"])\n print(a_random_pokemon[\"Battle Stats\"][\"HP\"])\n print(a_random_pokemon[\"Battle Stats\"][\"Attack\"])\n\n print() # For formatting\n # P R O B L E M 3\n filepath = \"pokemon.csv\"\n pokedex = create_pokedex(filepath)\n pokemon_key = \"Glaceon\"\n #\n # # This is one of the many ways to check if a certain key exists in a dictionary!\n try:\n # # This step could potentially fail, so we \"try\" it first.\n my_favorite_pokemon = pokedex[pokemon_key]\n except KeyError:\n # # If it does fail under a KeyError, we'll print an error message.\n print(\"ERROR: Pokemon {} does not exist!\".format(pokemon_key))\n else:\n # # If it doesn't fail under a KeyError, we'll print the Pokemon's info!\n print(\"PRINTING {}'S INFORMATION...\".format(pokemon_key))\n for key in my_favorite_pokemon.keys():\n print(\"{}: {}\".format(key, my_favorite_pokemon[key]))", "def main():\n # P R O B L E M 2\n a_random_pokemon = create_entry(81, \"Magnemite\", \"Electric\", \"Steel\", 25, 35, 70, 95, 55, 45, 1, False)\n\n for key in a_random_pokemon.keys():\n print(\"{}: {}\".format(key, a_random_pokemon[key]))\n\n # print(a_random_pokemon[\"Battle Stats\"])\n # print(a_random_pokemon[\"Battle Stats\"][\"HP\"])\n # print(a_random_pokemon[\"Battle Stats\"][\"Attack\"])\n\n print() # For formatting\n # P R O B L E M 3\n filepath = \"pokemon.csv\"\n pokedex = create_pokedex(filepath)\n pokemon_key = \"Glaceon\"\n\n # This is one of the many ways to check if a certain key exists in a dictionary!\n try:\n # This step could potentially fail, so we \"try\" it first.\n my_favorite_pokemon = pokedex[pokemon_key]\n except KeyError:\n # If it does fail under a KeyError, we'll print an error message.\n print(\"ERROR: Pokemon {} does not exist!\".format(pokemon_key))\n else:\n # If it doesn't fail under a KeyError, we'll print the Pokemon's info!\n print(\"PRINTING {}'S INFORMATION...\".format(pokemon_key))\n for key in my_favorite_pokemon.keys():\n print(\"{}: {}\".format(key, my_favorite_pokemon[key]))", "def run():\n\n title_list = [\"* id of item\",\n \"* title\",\n \"* price\",\n \"* month of the sale\",\n \"* day of the sale\",\n \"* year of the sale\",\n \"* customer's id\"]\n\n # ! sign with a position is unfinished function but added in options\n # !8. Show the sale numbers of games for each customer-292\n # !11. Show the customer who spent the most and the amount spent-365\"\n # !12. Show the customer's id who spent the most and the amount spent-376\"\n # !13. Show the most frequent buyers-387\n # !14. Show the if of the most freuent buyers-\n\n options = [\"Print table\",\n \"Get game title by id\",\n \"Show the most recently sold game\",\n \"Get the sum of games' prices by their id\",\n \"Get the customer's id by the id of a game\",\n \"Show ids of all customers who purchased games\",\n \"Show sale ids of all customers\",\n \"Show the owner of a recently sold game\",\n \"Show the owner's id of a recently sold game\",\n \"Show the most frequent buyers\",\n \"Show the ids of the most frequent buyers\",\n \"Get the customer by id\"]\n\n os.system('clear')\n file = \"model/sales/sales.csv\"\n choice = None\n while choice != \"0\":\n os.system('clear')\n terminal_view.print_predator()\n terminal_view.print_menu(\"What do you want to do:\", options, \"Back to main menu\")\n choice = terminal_view.get_choice(options)\n\n if choice == \"1\":\n os.system(\"clear\")\n common.all_print_table(title_list, file)\n\n elif choice == \"2\":\n os.system(\"clear\")\n print(\"Get game title by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n identification = common.get_input(\"Enter the id: \")\n print(sales.get_title_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"3\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n most_recently_sold_game = sales.get_item_id_title_sold_last(table)\n print(\"The most recently sold game is: \")\n terminal_view.print_table([most_recently_sold_game], [\"* id\", \"* title\"])\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"4\":\n os.system(\"clear\")\n print(\"Get the sum of games' prices by their id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n item_ids = []\n x = True\n while x:\n add_id = common.get_input(\"Enter the id or 'x' to exit: \")\n if add_id == \"x\":\n x = False\n item_ids.append(add_id)\n print(sales.get_the_sum_of_prices_from_table(table, item_ids))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"5\":\n os.system(\"clear\")\n print(\"Get the customer's id by the id of a game\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n sale_id = common.get_input(\"Enter the id of a game: \")\n print(sales.get_customer_id_by_sale_id_from_table(table, sale_id))\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"6\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n ids_of_all_customers = sales.get_all_customer_ids_from_table(table)\n print(\"ids of all customers who purchased games:\\n\", ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"7\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n sale_ids_of_all_customers = sales.get_all_sales_ids_for_customer_ids_form_table(table)\n print(\"Sale ids of all customers:\\n\\n\", sale_ids_of_all_customers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"8\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n last_buyer = sales.get_the_last_buyer_name(table_from_customers, table_from_sales)\n print(\"Owner of a recently sold game: \", last_buyer)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"9\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n last_buyer_id = sales.get_the_last_buyer_id(table)\n print(\"Owner's id of a recently sold game: \", last_buyer_id)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"10\":\n file_name_sales = common.get_double_file(\"Choose a file with sales: \")\n if file_name_sales == \"\":\n file_name_sales = file\n file_name_customer = common.get_double_file(\"Choose a file with customers: \")\n if file_name_customer == \"\":\n file_name_customer = \"model/crm/customers.csv\"\n table_from_customers = common.get_table_from_file(file_name_customer)\n table_from_sales = common.get_table_from_file(file_name_sales)\n the_most_frequent_buyers = sales.get_the_most_frequent_buyers_names(table_from_customers,\n table_from_sales,\n num=1)\n print(\"The most frequent buyers:\\n\", the_most_frequent_buyers)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"11\":\n file_name = common.get_file()\n if file_name == \"\":\n file_name = file\n table = common.get_table_from_file(file_name)\n the_most_frequent_buyers_ids = sales.get_the_most_frequent_buyers_ids(table, num=1)\n print(\"ids of the most frequent buyers:\\n\", the_most_frequent_buyers_ids)\n common.waiting()\n os.system(\"clear\")\n\n elif choice == \"12\":\n os.system(\"clear\")\n print(\"Get the customer by id\\n\")\n file_name = common.get_file()\n if file_name == \"\":\n file_name = \"model/crm/customers.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, [\"* id\", \"* name\", \"* email\", \"* subscribed\"])\n identification = common.get_input(\"Enter the id: \")\n print(crm.get_name_by_id_from_table(table, identification))\n common.waiting()\n os.system(\"clear\")\n\n else:\n if choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\\n\")\n common.waiting()", "def main(args):\n \n args_are_valid, input_filepath, output_filepath, base_url, message = handle_arguments(args)\n if not args_are_valid:\n return print(message)\n \n with open(input_filepath, newline=\"\") as input_csv:\n csvreader = csv.reader(input_csv, delimiter=\",\",)\n\n needed_input_columns = [\"Account ID\",\"First Name\", \"Created On\"]\n needed_output_columns = [\"Account ID\",\"First Name\", \"Created On\", \"Status\", \"Status Set On\"]\n headers = next(csvreader) #grab first row as headers\n if not set(needed_input_columns).issubset(headers):\n print('ERROR - input csv must contain columns [\"Account ID\",\"First Name\", \"Created On\"] as headers')\n\n with open(output_filepath, mode = \"w\", newline = \"\") as output_csv:\n csvwriter = csv.DictWriter(output_csv, fieldnames = needed_output_columns)\n csvwriter.writeheader()\n\n index_of = {}\n for index,header in enumerate(headers):\n index_of[header] = index\n write_dict = {}\n\n #Loop through inputfile\n for row in csvreader:\n still_valid = True\n if len(row) != len(headers):\n message = \"ERROR - csv row has incomplete data\"\n still_valid = False\n if still_valid:\n # extract data from row, columns can be in any order\n for column in needed_input_columns:\n write_dict[column] = row[index_of[column]]\n still_valid, write_dict, message = verify_and_clean_input(write_dict)\n if still_valid:\n write_dict, message = extend(write_dict, query(write_dict[\"Account ID\"], base_url))\n #only write to csv if all input data valid, query data nulled out if invalid\n csvwriter.writerow(write_dict) \n print(message)\n\n output_csv.close() \n input_csv.close()", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def main():\n movies = read_movies('bond.csv')\n\n print('Original list (first 10):')\n print_movies(movies[:10])\n\n sorted_movies = movie_sort(movies)\n print('\\nSorted list (by year, first 10):')\n print_movies(sorted_movies[:10])\n\n bonus(movies)", "def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)", "def main():\n download_files()\n data, validCountries = clean_data()\n while True:\n choice = pyip.inputMenu([\"Confirmed\", \"Deaths\", \"Recovered\", \"Confirmed per capita\", \"Deaths per capita\", \"Recovered per capita\", \"See countries\", \"Quit\"], numbered=True)\n if choice == \"Quit\":\n break\n elif choice == \"See countries\":\n for country in validCountries:\n print(country)\n continue\n response = pyip.inputStr(\"Enter countries of interest (separate by commas): \")\n countries = response.split(\", \")\n for country in countries:\n if country not in validCountries: #check if countries are valid (in dataframe)\n countries.remove(country)\n print(f\"{country} is not a valid entry\")\n\n choiceDict = {\"Confirmed\": 0, \"Deaths\": 1, \"Recovered\": 2, \"Confirmed per capita\": 3, \"Deaths per capita\": 4, \"Recovered per capita\": 5}\n\n fig, ax = plt.subplots()\n\n for country in countries:\n data[choiceDict[choice]].T[country].plot(ax=ax)\n ax.legend(countries)\n plt.xlabel(\"Date\")\n label = choice\n if label != \"Deaths\":\n label += \" cases\"\n plt.ylabel(f\"Number of {label.lower()}\")\n plt.title(f\"Number of {label.title()}\")\n plt.show()", "def main():\n\n program_run = True\n\n while program_run:\n item = 1 # Column A\n expense = 2 # Column B\n income = 3 # Column C\n month = 4 # Column D\n row_numbers = {\n \"a\": 5, # Row of items\n \"e\": 0\n }\n\n error_message = \"\"\"\n Sorry! I don't seem to be able to carry out the request you gave me, please\n try again and give a valid argument (this program is case sensitive)\n \"\"\"\n choice_q = \"\"\"\n Would you like to Look at data options or put some data into your budget?\n [R/W]\n \"\"\"\n read_q = \"\"\"\n What information would you like to access?\n Total income[ti]\n Total expences[te]\n Profit[p]\n All[a]\n \"\"\"\n write_q = \"Have you sold or bought an item? [s/b] (q to quit)\"\n type = \"type 1 to read data, 2 to write data or q to quit: \"\n\n input1 = input(type) # Asks user whether they want to read or write info\n if input1 == \"q\": # Allows the user to quit at any given time\n program_run = False\n elif input1 == \"1\": # input chosen 'read'\n while input1 == \"1\":\n input2 = input(read_q) # Asks user on info regarding reading info\n if input2 == \"ti\": # Prints total income for the user\n print(\"\\tYour total income is: \" + cell_ti)\n break\n elif input2 == \"te\": # Prints total expenses for the user\n print(\"\\tYour total expences are: \" + cell_te)\n break\n elif input2 == \"p\": # Prints total profit for user, if Profit\n if cell_p <= 0: # below 0, user will get 'in debt' message.\n print(\"\\tYou're currently \" + cell_p + \" in debt.\")\n break\n else:\n print(\"\\tYour total profit is: \" + cell_p)\n break\n elif input2 == \"a\": # User will get all of the information above\n print(\"\\tYour total income is: \" + cell_ti + '\\n' +\n \"\\tYour total expences are: \" + cell_te + '\\n' +\n \"\\tYour total profit is: \" + cell_p)\n break\n else:\n print(error_message)\n else:\n break\n elif input1 == \"2\": # Input chosen 'write'\n while input1 == \"2\":\n input3 = input(write_q)\n if input3 == \"s\": # user sold something\n with open(\"row_used.json\") as ru:\n number = json.load(ru)\n a = 0\n a += int(number) # Code used to contantly move\n a += 1 # down rows when inputting new\n # data so that the data doesn't\n # overlap.\n with open(\"row_used.json\", 'w') as ru:\n ru.truncate(0)\n json.dump(a, ru)\n\n item_sold = input(\"What did you sell?: \")\n sheet.update_cell(a,item, item_sold)\n sheet.update_cell(a,expense, row_numbers['e']) # This 'e'(0) is here since the user didn't actually lose\n income_price = input(\"How much did you sell it for?: \") # any money, it will fill in the cell marked 'expences'\n sheet.update_cell(a,income, income_price) # to 0\n month_sold = input(\"In what month did you make the sale?(eg. Aug): \")\n sheet.update_cell(a,month, month_sold)\n\n ru.close()\n elif input3 == \"b\": # User bought something\n with open(\"row_used.json\") as ru:\n number = json.load(ru)\n a = 0\n a += int(number) # Code used to contantly move\n a += 1 # down rows when inputting new\n # data so that the data doesn't\n # overlap.\n with open(\"row_used.json\", 'w') as ru:\n ru.truncate(0)\n json.dump(a, ru)\n\n item_bought = input(\"What did you buy?: \")\n sheet.update_cell(a,item, item_bought)\n item_expense = input(\"How much was the item?: \")\n sheet.update_cell(a,expense, item_expense)\n sheet.update_cell(a,income, row_numbers['e']) # again 'e' is the value 0 since user isn't making income\n month_sold = input(\"In what month did you make the sale?(eg. Aug): \")\n sheet.update_cell(a,month, month_sold)\n\n ru.close()\n\n\n elif input3 == \"q\":\n program_run = False\n break\n\n else:\n print(error_message)", "def main3():\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #use DictReader method from csv module\r\n csv_reader = csv.DictReader(csvfile1)\r\n #read the lines\r\n for line in csv_reader:\r\n print(line['email'])", "def run():\n\n options = [\"Add new record to table\",\n \"Remove a record with a given id from the table\",\n \"Update specified record in the table\",\n \"Number of different kinds of game that are available of each manufacturer\",\n \"Average amount of games in stock of a given manufacturer\",\n \"Print table\"]\n\n title_list = [\"*id\",\n \"* title\",\n \"* manufacturer\",\n \"* price\",\n \"* in_stock\"]\n os.system('clear')\n choice = None\n while choice != \"0\":\n terminal_view.print_menu(\"What do you want to do:\", options, \"Back to main menu\")\n choice = terminal_view.get_choice(options)\n os.system('clear')\n if choice == \"1\":\n # to jest dzialajacy plik model/store/games.csv\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n record = terminal_view.get_inputs(title_list, \"Enter data: \")\n table = store.add(table, record)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been added ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"2\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n id_ = common.get_input(\"Get id to removed: \")\n table = store.remove(table, id_)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been removed ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"3\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n id_ = common.get_input(\"Enter id to update: \")\n record = terminal_view.get_inputs(title_list, \"Enter data: \")\n table = store.update(table, id_, record)\n common.write_table_to_file(file_name, table)\n os.system(\"clear\")\n terminal_view.gprint('*** Record has been updated ***')\n common.waiting()\n os.system(\"clear\")\n elif choice == \"4\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n dictionary = store.get_counts_by_manufacturers(table)\n terminal_view.print_dictionary(\"Number of different kinds of game that are\" +\n \"available of each manufacturer:\", dictionary)\n common.waiting()\n os.system(\"clear\")\n elif choice == \"5\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n manufacturer = common.get_input(\"Enter a manufacturer: \")\n print(store.get_average_by_manufacturer(table, manufacturer))\n common.waiting()\n os.system(\"clear\")\n elif choice == \"6\":\n file_name = common.get_input(\"Choose a file: \")\n if file_name == \"\":\n file_name = \"model/store/games.csv\"\n table = common.get_table_from_file(file_name)\n terminal_view.print_table(table, title_list)\n common.waiting()\n os.system(\"clear\")\n\n else:\n if choice != \"0\":\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def main():\n # Initialize key variables\n alldata = [[\n 'Meet', 'City', 'Country', 'Course', 'Event ID', 'Distance', 'Stroke',\n 'Round', 'Gender', 'Firstname', 'Lastname', 'Birthyear', 'Height cm',\n 'Weight Kg', 'BMI', 'Speed / Kg', 'Speed m/s', 'Time']]\n finadata = []\n olympicdata = []\n ts_start = int(time.time())\n\n # Get filename\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l', '--lenex_directory',\n help='Name of directory with LENEX XML files.',\n type=str, required=True)\n parser.add_argument(\n '-o', '--olympic_directory',\n help='Name of directory with Olympic XLSX files.',\n type=str, required=True)\n parser.add_argument(\n '-p', '--profile_directory',\n help='Name of directory with athlete profiles.',\n type=str, required=True)\n parser.add_argument(\n '-d', '--database_file',\n help='Name of database file.',\n type=str, required=True)\n args = parser.parse_args()\n lenex_directory = args.lenex_directory\n profile_directory = args.profile_directory\n database_file = args.database_file\n olympic_directory = args.olympic_directory\n\n # Get the profiles\n profiles = _read_profiles(profile_directory)\n\n # Process Fina data\n finadata = _lenex(lenex_directory, profiles)\n\n # Process Olympic data\n olympicdata = _olympic(olympic_directory, profiles)\n\n # Get all data\n alldata.extend(finadata)\n alldata.extend(olympicdata)\n\n # Create output file\n with open(database_file, 'w') as f_handle:\n writer = csv.writer(f_handle, delimiter='|')\n writer.writerows(alldata)\n\n # Print status\n print('Swimmer event results created: {}'.format(len(alldata) - 1))\n print('Duration: {}'.format(int(time.time() - ts_start)))", "def execute(self, args: Namespace):\n data = {}\n\n with open(args.data, 'r') as file:\n reader = csv.reader(file, delimiter=';')\n\n for row in reader:\n if reader.line_num == 1:\n if 'Timestamp, Decimal, TriState, State' != ','.join(row):\n print('Not a valid CSV file!')\n return\n\n else:\n dt = datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S.%f')\n value = tri_state_value(row[2])\n device = data.setdefault(tri_state_device(row[2].strip()), {})\n device.setdefault(dt.date(),[]).append((dt.strftime('%H:%M'), value))\n\n for k in sorted(data.keys()):\n device = data[k]\n print('Device {}:'.format(tint_yellow(k)))\n\n for d in sorted(device.keys()):\n device[d].sort(key=lambda x: x[0])\n print('\\t{}:'.format(tint_blue(d)))\n\n for (t, v) in device[d]:\n if v:\n print('\\t\\tAt {} the device was turned {}.'.format(tint_blue(t),\n tint_green('ON')))\n else:\n print('\\t\\tAt {} the device was turned {}.'.format(tint_blue(t),\n tint_red('OFF')))", "def userReport():\n for player, dat in players.items():\n if \"Arca\" not in dat and \"Observatorio\" not in dat \\\n and \"Atomium\" not in dat and \"Dirigible\" not in dat \\\n and \"Estatua\" not in dat and \"Baño\" not in dat:\n continue\n\n print(\"-------------------------------------------------------------\")\n print(player, \" - \", dat[\"Edad\"])\n if \"Arca\" in dat:\n print(\" Arca %i\" % dat[\"Arca\"])\n if \"Observatorio\" in dat:\n print(\" Observatorio %i\" % dat[\"Observatorio\"])\n if \"Atomium\" in dat:\n print(\" Atomium %i\" % dat[\"Atomium\"])\n\n if \"Estatua\" in dat:\n for ed in dat[\"Estatua\"]:\n print(\" Estatua %i - %s\" % (ed[\"Nivel\"], ed[\"Edad\"]))\n\n if \"Dirigible\" in dat:\n for ed in dat[\"Dirigible\"]:\n print(\" Dirigible 11 - %s\" % ed)\n if \"Baño\" in dat:\n print(\" Baño Real %i - %s\" % (\n dat[\"Baño\"][\"Nivel\"], dat[\"Baño\"][\"Edad\"]))\n\n print()", "def print_records(results_file, player_1, player_2):\n # keep track of the results in the file\n results_lines = []\n\n # read all of the lines from the file into a list\n with open(results_file) as f:\n results_lines = f.readlines()\n\n # parse the results (results will be a dictionary of string and tuple)\n # { string->name: tuple->(int->wins, int->losses) }\n # { 'reed': (2, 5), 'britney': (5, 2) }\n results = parse_results(results_lines)\n\n player_1_wins = results[player_1][0]\n player_1_losses = results[player_1][1]\n player_2_wins = results[player_2][0]\n player_2_losses = results[player_2][1]\n\n print \"\\n%s's record is %d wins and %d losses\" % (player_1, player_1_wins, player_1_losses)\n print \"\\n%s's record is %d wins and %d losses\" % (player_2, player_2_wins, player_2_losses)", "def main():\n args = parse_arguments()\n\n ##### READ AND PREPROCESS DATA #####\n mentors, candidates, mentors_years, candidates_years = read_and_preprocess(args.mentor_file, args.candidate_file, args.num_preferences)\n\n # Store a copy of the initially declared preferences (after filtering)\n # This is used later to see how many people didn't get someone in their top \n candidate_dict_initial = list_to_dictionary(candidates)\n mentor_dict_initial = list_to_dictionary(mentors)\n\n ##### PAIR BASED ON FIRST CHOICES #####\n successful_pairings = match_top_choices(list_to_dictionary(mentors), list_to_dictionary(candidates))\n print(\"Paired {} people based on first choices\".format(len(successful_pairings)))\n remove_pairings(successful_pairings, mentors, candidates)\n\n ##### PAIR BASED ON ONE-SIDED PREFERENCE #####\n ## ONE SIDED CANDIDATES\n one_sided_candidate_pairings = match_no_preference(candidates, mentors)\n print(\"Paired {} people based on candidates with no mentor preferences\".format(len(one_sided_candidate_pairings)))\n remove_pairings(one_sided_candidate_pairings, candidates, mentors)\n\n ## ONE SIDED MENTORS\n one_sided_mentor_pairings = match_no_preference(mentors, candidates)\n print(\"Paired {} people based on mentors with no candidate preferences\".format(len(one_sided_mentor_pairings)))\n remove_pairings(one_sided_mentor_pairings, mentors, candidates)\n\n # Fill the remaining preference list with the valid unspecified mentors/candidates\n mentors_filled, candidates_filled = fill_with_valid(mentors, candidates, mentors_years, candidates_years)\n\n candidate_dict = list_to_dictionary(candidates_filled)\n mentor_dict = list_to_dictionary(mentors_filled)\n\n stable_matches = stable_matching.stable_marriage(mentor_dict, candidate_dict)\n stable_match_success = (len(stable_matches) != 0)\n if stable_match_success:\n print(\"Paired {} people based on stable matching\".format(len(stable_matches)))\n else:\n print(\"Warning: Could not determine a stable match with the optimizations.\")\n print(\"Attempting to stable match without...\")\n mentors, candidates, mentors_years, candidates_years = read_and_preprocess(args.mentor_file, args.candidate_file, args.num_preferences)\n # Fill the remaining preference list with the valid unspecified mentors/candidates\n mentors_filled, candidates_filled = fill_with_valid(mentors, candidates, mentors_years, candidates_years)\n\n candidate_dict = list_to_dictionary(candidates_filled)\n mentor_dict = list_to_dictionary(mentors_filled)\n\n # Stable match immediately\n stable_matches = stable_matching.stable_marriage(mentor_dict, candidate_dict)\n if len(stable_matches) == 0:\n print(\"Error: Could not stable match these preference lists.\")\n return\n\n # Combine the pairings from all sources into a single list\n if stable_match_success:\n all_pairings = []\n for mentor, candidate in successful_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on first choice\"])\n for mentor, candidate in one_sided_mentor_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on one-sided mentors\"])\n for candidate, mentor in one_sided_candidate_pairings:\n all_pairings.append([mentor, candidate, \"Paired based on one-sided candidates\"])\n for mentor, candidate in stable_matches:\n mentor_name = mentor\n if mentor is None:\n mentor_name = \"No mentor\"\n candidate_name = candidate\n if candidate is None:\n candidate_name = \"No candidate\"\n all_pairings.append([mentor_name, candidate_name, \"Paired based on stable matching\"])\n else:\n all_pairings = stable_matches\n\n output_file = \"pairings.csv\"\n if args.output_file:\n output_file = args.output_file\n with open(output_file, \"w+\") as f:\n f.write(\"Mentor,Candidate,Notes\\n\")\n for mentor, candidate, notes in all_pairings:\n f.write(\"{},{},{}\\n\".format(mentor, candidate, notes))", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def main():\n\n args = get_args()\n random.seed(args.seed)\n wod = []\n\n for name, low, high in read_csv(args.file):\n reps = random.randint(low, high)\n if args.easy:\n reps = int(reps / 2)\n wod.append((name, reps))\n\n wod = random.sample(wod, k=args.num_exercises)\n print(tabulate(wod, headers=('Exercise', 'Reps')))", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def main():\n\n if len(sys.argv) != 3:\n print(main.__doc__)\n sys.exit()\n\n new = read_file(sys.argv[1])\n reference = read_file(sys.argv[2])\n\n check_items(new, reference)", "def main():\n\n #get the csv file into a data-frame\n universities_df = pd.read_csv('universities_data.csv', encoding = 'utf-8-sig')\n universities_names_list = universities_df['name'].tolist()\n\n #get list of university objects\n url = 'http://universities.hipolabs.com/search?country=Israel'\n api_universities = Get_universities(url)\n list_of_universities = api_universities.get_universities_info()\n\n #to see if we got new entities or not for exporting to csv later..\n is_new_entities = False\n\n for university in list_of_universities:\n if university.name not in universities_names_list:\n is_new_entities = True\n universities_df= universities_df.append(pd.DataFrame({\n 'alpha_two_code': [university.alpha_two_code], \n 'country': [university.country],\n 'web_pages': [str(university.web_pages)],\n 'domains': [str(university.domains)],\n 'name': [university.name],\n 'state_province':[str(university.state_province)]}) , ignore_index = True)\n\n #export back to csv if true\n if is_new_entities: \n print('we got new entities!') \n universities_df.to_csv('universities_data.csv', encoding = 'utf-8-sig', index = False)\n else:print('no new universities for now!')", "def main():\n\n print(\"--------------------\")\n print(\"| codedrome.com |\")\n print(\"| Percentile Ranks |\")\n print(\"--------------------\\n\")\n\n try:\n\n f_in = open(\"grades.csv\")\n r = csv.DictReader(f_in, fieldnames=['grade'])\n\n grades = []\n\n for item in r:\n grades.append(item['grade'])\n\n f_in.close()\n\n percentile_ranks = percentileranks.calculate_percentile_ranks(grades)\n\n percentileranks.print_percentile_ranks(percentile_ranks)\n\n except Exception as e:\n\n print(e)", "def run(filename=utils.MOD_FILE+utils.FILE_EXTENSION):\n # Exception handling in case the logfile doesn't exist\n try:\n data = pd.io.parsers.read_csv(filename, encoding=\"utf-8-sig\")\n except OSError as e:\n print(\"ERROR: \" + filename + \" does not exist. Did you run logfileSPOC.py?\")\n\n conditions = [utils.COL_VOTING, utils.COL_PROMPTS]\n\n user_input = input(\"> Print descriptive statistics? [y/n]: \")\n if is_yes(user_input):\n descriptive_stats(data[conditions+[utils.COL_NUM_COMMENTS]].dropna())\n user_input = input(\">> Display descriptive statistics plot? [y/n]: \")\n if is_yes(user_input):\n compare_plot_instances(data[conditions])\n\n user_input = input(\">> Display descriptive plot of \" + utils.COL_NUM_COMMENTS + \"? [y/n]: \")\n if is_yes(user_input):\n descriptive_plot(data[[utils.COL_NUM_COMMENTS]])\n\n user_input = input(\"> Display comparison plots of conditions -> \"+utils.COL_NUM_COMMENTS+\"? [y/n]: \")\n if is_yes(user_input):\n compare_plot_outcome(data[conditions+[utils.COL_NUM_COMMENTS]].dropna())\n\n user_input = input(\"> Print t-test statistics for all conditions? [y/n]: \")\n if is_yes(user_input):\n t_test(data[conditions+[utils.COL_NUM_COMMENTS]].dropna(), utils.COND_PROMPT_POS, utils.COND_PROMPT_NEUTRAL)\n\n user_input = input(\"> Print One-Way ANOVA statistics for all conditions? [y/n]: \")\n if is_yes(user_input):\n one_way_anova(data[conditions+[utils.COL_NUM_COMMENTS]].dropna())\n\n user_input = input(\"> Print ANCOVA statistics for all conditions (num prompts as covariate)? [y/n]: \")\n if is_yes(user_input):\n ancova(data[conditions+[utils.COL_NUM_PROMPTS, utils.COL_NUM_COMMENTS]].dropna())\n\n user_input = input(\"> Print Two-Way ANOVA Interaction \" + str(conditions) + \" statistics? [y/n]: \")\n exp_data = data[conditions + [utils.COL_NUM_COMMENTS]]\n anova_interaction(exp_data)\n if is_yes(user_input):\n anova_interaction(exp_data)\n user_input = input(\">> Display Interaction plot? [y/n]: \")\n if is_yes(user_input):\n plot_interaction(exp_data)", "def main(argv):\n # Question 1\n # Saves the features given in a list\n features = (argv[2].split(sep=\", \"))\n the_data = data.load_data(argv[1], features)\n statistic_functions = [sum, mean, median]\n # Saves the relevant records\n summer_data, not_summer = data.filter_by_feature(the_data, \"season\", [1])\n holiday_data, not_holiday = data.filter_by_feature(the_data, \"is_holiday\", [1])\n print(\"Question 1:\")\n print(\"Summer:\")\n data.print_details(summer_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"Holiday:\")\n data.print_details(holiday_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"All:\")\n data.print_details(the_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n\n # Question 2\n print(\"\\nQuestion 2\")\n print(\"If t1<=13.0, then:\")\n # Saves the relevant records\n winter_data, not_winter = data.filter_by_feature(the_data, \"season\", [3])\n w_h_data, not_w_h_data = data.filter_by_feature(winter_data, \"is_holiday\", [1])\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n print(\"If t1>13.0, then:\")\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])", "def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)", "def check_my_books(main_page):\n\n login = main_page.login\n\n # rented.csv = [ID, rental_date, return_date, login]\n with open('rented.csv', 'r') as rented_base:\n rented_reader = csv.reader(rented_base)\n next(rented_reader)\n\n books_table = []\n\n for line in rented_reader:\n if line[-1] == login:\n books_table.append([line[0],line[1],line[2]])\n\n print(\"Your rented books are:\")\n\n books_table_reader(books_table)", "def cli(context, repeats_file):\n\n repeat_information = {}\n with open(repeats_file, 'r') as file_handle:\n repeat_information = parse_repeat_file(file_handle, repeats_file_type='json')\n\n if not repeat_information:\n LOG.warning(\"Could not find any repeat info\")\n context.abort()\n\n header = [\"HGNCId\", \"LocusId\", \"DisplayRU\", \"InheritanceMode\", \"normal_max\", \"pathologic_min\", \"Disease\", \"SourceDisplay\", \"SourceId\"]\n table_line = \"| {0} | {1} | {2} | {3} | {4} | {5} | {6} | {7} | {8} |\"\n click.echo(table_line.format(\n header[0], header[1], header[2], header[3], header[4], header[5], header[6], header[7], header[8]\n ))\n click.echo(table_line.format('-------', '-------', '-------', '-------', '-------',\n '-------', '-------', '-------', '-------' ))\n for entry in repeat_information:\n click.echo(table_line.format(\n repeat_information[entry][header[0]],\n entry,\n repeat_information[entry][header[2]],\n repeat_information[entry][header[3]],\n repeat_information[entry][header[4]],\n repeat_information[entry][header[5]],\n repeat_information[entry][header[6]],\n repeat_information[entry][header[7]],\n repeat_information[entry][header[8]],\n ))", "def open_file():\r\n\tr_ct = 0\r\n\t\r\n\twith open('flavors_of_cacao.csv', 'r') as csvfile:\r\n\t\tcacao_stream = csv.DictReader(csvfile)\r\n\t\tfor cacao_row in cacao_stream:\r\n\t\t\tr_ct += 1\r\n\t\t\t\r\n\t\t\t#quit after 100 records\r\n\t\t\tif r_ct > 100:\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t#pull the data out of the dictionary for sqlite3\r\n\t\t\tt_Company = cacao_row['Company']\r\n\t\t\tt_Specific_Bean_Origin = cacao_row['Specific_Bean_Origin']\r\n\t\t\tt_REF = cacao_row['REF']\r\n\t\t\tt_Review = cacao_row['Review']\r\n\t\t\tt_Cocoa = cacao_row['Cocoa']\r\n\t\t\tt_Location = cacao_row['Location']\r\n\t\t\tt_Rating = cacao_row['Rating']\r\n\t\t\tt_Bean = cacao_row['Bean']\r\n\t\t\tt_Broad_Bean_Origin = cacao_row['Broad_Bean_Origin']\r\n\t\t\t\r\n\t\t\t#print the first 15 lines\r\n\t\t\tif r_ct <= 15:\r\n\t\t\t\tprint (r_ct, t_Company, t_Bean, t_Cocoa, t_Review)\r\n\t\t\t\t\r\n\t\t\t#creates a sql cursor, formats the insert sql and executes it\r\n\t\t\tc = conn.cursor()\r\n\t\t\tstrsql = \"\"\"\r\n\t\t\t\tINSERT INTO cacao\r\n\t\t\t\t\t(Company, Specific_Bean_Origin, REF, Review, Cocoa, Location, Rating, Bean, Broad_Bean_Origin)\r\n\t\t\t\tvalues (\r\n\t\t\t\t\t'{t_Company}', '{t_Specific_Bean_Origin}', '{t_REF}', '{t_Review}', '{t_Cocoa}', '{t_Location}', '{t_Rating}', '{t_Bean}', '{t_Broad_Bean_Origin}');\r\n\t\t\t\t\"\"\".format(\r\n\t\t\t\t\tt_Company = t_Company,\r\n\t\t\t\t\tt_Specific_Bean_Origin = t_Specific_Bean_Origin,\r\n\t\t\t\t\tt_REF = t_REF,\r\n\t\t\t\t\tt_Review = t_Review,\r\n\t\t\t\t\tt_Cocoa = t_Cocoa,\r\n\t\t\t\t\tt_Location = t_Location,\r\n\t\t\t\t\tt_Rating = t_Rating,\r\n\t\t\t\t\tt_Bean = t_Bean,\r\n\t\t\t\t\tt_Broad_Bean_Origin = t_Broad_Bean_Origin\r\n\t\t\t\t\t)\r\n\t\t\tc.execute(strsql)\r\n\t\t\tconn.commit()", "def main():\r\n if len(sys.argv)==4:\r\n\r\n # files path\r\n m_file_path,c_file_path,database = sys.argv[1:]\r\n\r\n # first, read the data\r\n print('Reading the data...')\r\n df = read_data(m_file_path,c_file_path)\r\n print('OK!')\r\n print(' ')\r\n \r\n # clean it\r\n print('Cleaning the data...')\r\n df = clean_data(df)\r\n print('OK!')\r\n print(' ')\r\n \r\n # save it\r\n print('Saving data...')\r\n save_data(df,database)\r\n print(' ')\r\n \r\n # when it's done\r\n print(f'Cleaned data is stored in {database[:-3]} database') \r\n\r\n else:\r\n print('Please provide the filepaths of the messages and categories '\\\r\n 'datasets as the first and second argument respectively, as '\\\r\n 'well as the filepath of the database to save the cleaned data '\\\r\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\r\n 'disaster_messages.csv disaster_categories.csv '\\\r\n 'DisasterResponse.db')", "def main():\n # Check the user CLI input matches correct syntax\n try:\n # Specify the valid CLI options/arguments\n opts, _ = getopt.getopt(\n sys.argv[1:],\n \"hv\",\n [\n \"help\",\n \"verbose\",\n \"input=\",\n \"output=\",\n \"locations=\",\n \"trips=\",\n \"results=\",\n ],\n )\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n sys.exit(2)\n\n # Define input arguments and initialize default values.\n input_file = \"tests/input/input1.txt\"\n output_file = \"tests/output/output1.txt\"\n locations_file = \"data/locations.csv\"\n trips_file = \"data/trips.csv\"\n results = 3\n\n # Loop through all the User CLI options/arguments\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-v\", \"--verbose\"):\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n elif opt == \"--input\":\n input_file = arg\n\n if not os.path.exists(input_file):\n sys.exit(\"Could not find input file\")\n elif opt == \"--output\":\n output_file = arg\n elif opt == \"--locations\":\n locations_file = arg\n\n if not os.path.exists(locations_file):\n sys.exit(\"Could not find locations file\")\n elif opt == \"--trips\":\n trips_file = arg\n\n if not os.path.exists(trips_file):\n sys.exit(\"Could not find trips file\")\n elif opt == \"--results\":\n results = int(arg)\n\n parse(\n locations_file,\n trips_file,\n input_file,\n output_file,\n results,\n )", "def main():\n\n # start at loading the dataset\n data = h1bdata_loading()\n merged_data = pd.concat([data[year] for year in range(2010,2017)], ignore_index= True)\n raw_data = h1b_data(data)\n \n \n\n # Then clean the data\n #h1b_data = Clean_df(raw_data)\n #print(\"data cleaned >>>\")\n\n\n while True:\n try:\n print (\"================================ H1b Visa Approve Rate Exploring ================================\")\n print (\"\")\n print (\" How do you want to explore the H1b Data? \")\n print (\" <a> : Overview \t\t \")\n print (\" <b> : Location \")\n print (\" <c> : Industry \")\n print (\" <d> : Company \") \n print (\" You can always input 'quit' to leave the system \")\n print (\"=================================================================================================\")\n\n key = option_input()\n if key == 'a':\n overview(data)\n if key == 'b':\n location(data)\n if key == 'c':\n industry_exploring(merged_data)\n if key == 'd':\n company_exploring(merged_data)\n except wrong_option_exception:\n print (\"Invalid option, please reselect.\")", "def load_data(filename):\n evidenceList = []\n labelList = []\n \n with open(filename) as csv_file:\n csvReader = csv.reader(csv_file, delimiter=',')\n #lineCount = 0\n #first save the fields:\n numRow = 0\n next(csvReader)\n for row in csvReader:\n if(row[17] == \"FALSE\"):\n labelList.append(0)\n else: labelList.append(1)\n #labelList.append(row[17])\n evidence = [None] * 17\n evidenceList.append(evidence)\n for count in range(17):\n if count == 0 or count == 2 or count == 4 or (count >= 11 and count <= 14):\n evidenceList[numRow][count] = int(row[count])\n elif count == 10:\n if row[count] == \"Jan\":\n evidenceList[numRow][count] = 0\n elif row[count] == \"Feb\":\n evidenceList[numRow][count] = 1\n elif row[count] == \"Mar\":\n evidenceList[numRow][count] = 2\n elif row[count] == \"Apr\":\n evidenceList[numRow][count] = 3\n elif row[count] == \"May\":\n evidenceList[numRow][count] = 4\n elif row[count] == \"Jun\":\n evidenceList[numRow][count] = 5\n elif row[count] == \"Jul\":\n evidenceList[numRow][count] = 6\n elif row[count] == \"Aug\":\n evidenceList[numRow][count] = 7\n elif row[count] == \"Sep\":\n evidenceList[numRow][count] = 8\n elif row[count] == \"Oct\":\n evidenceList[numRow][count] = 9\n elif row[count] == \"Nov\":\n evidenceList[numRow][count] = 10\n elif row[count] == \"Dec\":\n evidenceList[numRow][count] = 11\n elif count == 15:\n if row[count] == \"Returning_Visitor\":\n evidenceList[numRow][count] = 1\n else:\n evidenceList[numRow][count] = 0\n elif count == 16:\n if row[count] == \"TRUE\":\n evidenceList[numRow][count] = 1\n else:\n evidenceList[numRow][count] = 0\n else:\n evidenceList[numRow][count] = float(row[count])\n numRow += 1\n print(evidence)\n \n print(labelList) \n print(len(labelList))\n print(len(evidenceList))\n returnTuple = (evidenceList, labelList)\n\n return returnTuple\n #raise NotImplementedError", "def book_printer(book_type):\n\n # books.csv = [title,author,year,ID,book_type]\n with open('books.csv', 'r') as book_base:\n book_list = csv.reader(book_base)\n next(book_list)\n\n for book_data in book_list:\n if book_data[-1] == book_type:\n print(book_data)\n ID = book_data[-2]\n if_rented(ID)\n print('\\n')\n return", "def print_categories():\n u_item_dict = {}\n with open('service_now_ticket_sample.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if row['u_category_gear'] not in u_item_dict:\n u_item_dict[row['u_category_gear']] = 1\n elif row[\"U_Category Match Found in GEAR?\"] == \"Yes\":\n u_item_dict[row['u_category_gear']] = u_item_dict[row['u_category_gear']] + 1\n print(sorted(u_item_dict.items(), key=operator.itemgetter(1)))", "def books_table_reader(books_table):\n\n # books.csv = [title, author, year, ID, book_type]\n with open('books.csv', 'r') as book_base:\n book_reader = csv.reader(book_base)\n next(book_reader)\n for line in book_reader:\n for box in books_table:\n if line[3] == box[0]:\n print(line)\n print(\"\\tRented on\",box[1],\"\\nTo be returned on\",box[2])", "def main(args):\n input_file = args[1]\n output_occupations = args[2]\n output_states = args[3]\n\n print(\"Analyzing input file:\")\n summary = process_data.Summary(input_file)\n print(\"Reading input data\")\n summary.read_file()\n\n print(\"Computing summaries\")\n occupations = summary.get_results(input_format.Concept.SOC_NAME)\n states = summary.get_results(input_format.Concept.WORK_STATE)\n\n print(\"Writing results\")\n occupations.to_file(output_occupations)\n states.to_file(output_states)", "def load_data(filename):\n\n # set one list for all evidence lists, and another for all label values\n evidence = []\n labels = []\n\n # get each row from csv file into a *list of rows*\n with open(filename) as f:\n users = csv.reader(f, delimiter=',')\n\n # dictionaries to translate csv values to numerical values\n months = {\"Jan\": 0, \"Feb\": 1, \"Mar\": 2, \"Apr\": 3, \"May\": 4, \"June\": 5, \"Jul\": 6,\n \"Aug\": 7, \"Sep\": 8, \"Oct\": 9, \"Nov\": 10, \"Dec\": 11}\n user_type = {\"New_Visitor\": 0, \"Returning_Visitor\": 1, \"Other\": random.choice([0, 1])}\n weekend_revenue = {\"TRUE\": 1, \"FALSE\": 0}\n\n # iterate through each row\n first_row = True\n for user in users:\n # skip header row\n if first_row:\n first_row = False\n continue\n\n local_evidence = []\n\n # iterate through each evidence value and append it to local_evidence\n for i in range(len(user) - 1):\n # change val to numerical before appending if needed\n if i == 10:\n local_evidence.append(months[user[i]])\n elif i == 15:\n local_evidence.append(user_type[user[i]])\n elif i == 16:\n local_evidence.append(weekend_revenue[user[i]])\n\n # change val type before appending if needed\n elif i == 0 or i == 2 or i == 4 or i == 11 or i == 12 or i == 13 or i == 14:\n local_evidence.append(int(user[i]))\n elif i == 1 or i == 3 or i == 5 or i == 6 or i == 7 or i == 8 or i == 9:\n local_evidence.append(float(user[i]))\n\n # append the complete list of evidence for the current user\n evidence.append(local_evidence)\n\n # append appropriate int value to labels for current user's label\n labels.append(weekend_revenue[user[-1]])\n\n return evidence, labels\n\n # raise NotImplementedError", "def proceed(results_sort):\n result_index = 0\n good = True\n with open('work_log.csv', 'r') as file:\n while good:\n proceed_prompt = input('Would you like to see the next '\n 'match (Y/N)? \\n>').upper()\n if proceed_prompt == 'Y':\n result_index += 1\n try:\n display_search_results(results_sort[result_index])\n except IndexError:\n print('There are no other entries that match your '\n 'search criteria.')\n clear()\n good = False\n elif proceed_prompt == 'N':\n good = False\n clear()", "def __init__(self, updated=False):\n self.deputies = []\n self.senators = []\n\n if updated:\n file = IDENTITY_FILE_UPDATED\n else:\n file = IDENTITY_FILE\n\n with open(file, 'rt') as csvfile:\n spamreader = csv.DictReader(csvfile, delimiter=';')\n for row in spamreader:\n # Senators and deputies has different contracts\n # TODO: CREATE ANOTHER FEATURE TELLING SENATORS vs DEPUTIES\n try:\n if row.get('sen:CodigoParlamentar') or 'senador' in row.get('post', '').lower():\n self.add_senator(Senator(row))\n\n if row.get('cam:ideCadastro') or 'deputado' in row.get('post', '').lower():\n self.add_deputy(Deputy(row))\n except AttributeError:\n import code; code.interact(local=dict(globals(), **locals()))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('Downloading data set from DC Open data')\n\n with open(input_filepath, 'r') as f:\n parking_violations = json.load(f)\n\n for fullname, csv in parking_violations.items():\n download_file = csv + '.csv'\n local_filename = '_'.join(name.lower() for name in fullname.split() ) + '.csv'\n local_filename = os.path.join(output_filepath, local_filename)\n if not os.path.isfile(local_filename):\n time.sleep(5)\n r = requests.get(download_file)\n if not b'\"status\":\"Processing\",\"generating\":{}' in r.content:\n with open(local_filename, 'wb') as f:\n f.write(r.content)\n logger.info(local_filename)\n else:\n logger.warning('Cannot download {0}'.format(local_filename))", "def import_observations(self):\n\n fn = QFileDialog(self).getOpenFileName(self, \"Choose a eMOC project file\", \"\",\n \"Project files (*.eMOC);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n if self.projectFileName and fileName == self.projectFileName:\n QMessageBox.critical(None, programName,\n \"This project is already open\", QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n if fileName:\n try:\n fromProject = json.loads(open(fileName, \"r\").read())\n except:\n QMessageBox.critical(self, programName, \"This project file seems corrupted\")\n return\n\n # transform time to decimal\n fromProject = convert_time_to_decimal(fromProject) # function in utilities.py\n\n dbc = dialog.ChooseObservationsToImport(\"Choose the observations to import:\",\n sorted(list(fromProject[OBSERVATIONS].keys())))\n\n if dbc.exec_():\n\n selected_observations = dbc.get_selected_observations()\n if selected_observations:\n flagImported = False\n\n # set of behaviors in current projet ethogram\n behav_set = set([self.pj[ETHOGRAM][idx][\"code\"] for idx in self.pj[ETHOGRAM]])\n\n # set of subjects in current projet\n subjects_set = set([self.pj[SUBJECTS][idx][\"name\"] for idx in self.pj[SUBJECTS]])\n\n for obsId in selected_observations:\n\n # check if behaviors are in current project ethogram\n new_behav_set = set(\n [event[EVENT_BEHAVIOR_FIELD_IDX] for event in fromProject[OBSERVATIONS][obsId][EVENTS]\n if event[EVENT_BEHAVIOR_FIELD_IDX] not in behav_set])\n if new_behav_set:\n diag_result = dialog.MessageDialog(programName,\n (\"Some coded behaviors in <b>{}</b> are\"\n \"not in the ethogram:<br><b>{}</b>\").format(obsId,\n \", \".join(\n new_behav_set)),\n [\"Interrupt import\", \"Skip observation\",\n \"Import observation\"])\n if diag_result == \"Interrupt import\":\n return\n if diag_result == \"Skip observation\":\n continue\n\n # check if subjects are in current project\n new_subject_set = set(\n [event[EVENT_SUBJECT_FIELD_IDX] for event in fromProject[OBSERVATIONS][obsId][EVENTS]\n if event[EVENT_SUBJECT_FIELD_IDX] not in subjects_set])\n if new_subject_set and new_subject_set != {\"\"}:\n diag_result = dialog.MessageDialog(programName,\n (\n \"Some coded subjects in <b>{}</b> are not defined in the project:<br>\"\n \"<b>{}</b>\").format(obsId,\n \", \".join(new_subject_set)),\n [\"Interrupt import\", \"Skip observation\",\n \"Import observation\"])\n\n if diag_result == \"Interrupt import\":\n return\n\n if diag_result == \"Skip observation\":\n continue\n\n if obsId in self.pj[OBSERVATIONS].keys():\n diag_result = dialog.MessageDialog(programName,\n (\"The observation <b>{}</b>\"\n \"already exists in the current project.<br>\").format(\n obsId),\n [\"Interrupt import\", \"Skip observation\",\n \"Rename observation\"])\n if diag_result == \"Interrupt import\":\n return\n\n if diag_result == \"Rename observation\":\n self.pj[OBSERVATIONS][\"{} (imported at {})\".format(obsId,\n datetime_iso8601()\n )] = dict(\n fromProject[OBSERVATIONS][obsId])\n flagImported = True\n else:\n self.pj[OBSERVATIONS][obsId] = dict(fromProject[OBSERVATIONS][obsId])\n flagImported = True\n\n if flagImported:\n QMessageBox.information(self, programName, \"Observations imported successfully\")", "def main():\n # Parse the required args for processing\n parser = argparse.ArgumentParser(description='This is a direct report calculator made by Dean Hutton')\n parser.add_argument('-i', '--input', help='Input file name used to run direct reports on.', required=True)\n parser.add_argument('-rd', '--run_date', help='The date to display direct reports for.', required=True)\n args = parser.parse_args()\n\n input_file_location = args.input\n\n # Do error checking making sure run_date is valid date and that input file exists\n if not os.path.isfile(input_file_location):\n print('There has been an error locating the input file. Please make sure this file exists {}'.format(args.input))\n sys.exit()\n\n try:\n run_date = datetime.strptime(args.run_date, '%Y-%m-%d')\n except ValueError as e:\n print(\"There has been an error parsing the run date. Please correct this date '{0}' \"\n \"so that it follows follows the '2011-03-24' date format.\".format(args.run_date))\n sys.exit()\n\n all_employee_dict, supervisor_employee_dict = etl_csv_file(input_file_location)\n\n # Check to see if there was an error parsing the CSV file and if so print it and exit\n if not all_employee_dict:\n print supervisor_employee_dict\n sys.exit()\n\n supervisor_milestone_list, all_employee_dict = generate_milestone_data(\n supervisor_employee_dict,\n all_employee_dict,\n run_date\n )\n non_supervisor_list = []\n\n # Create placeholders for all employees that are not supervisors so they can be printed\n for non_supervisor_id in all_employee_dict:\n non_sv_dict = {}\n non_sv_dict['supervisor_id'] = non_supervisor_id\n non_sv_dict['upcoming_milestones'] = 'No direct reports'\n non_supervisor_list.append(non_sv_dict)\n\n # Combine supervisors with non-supervisors for printing\n final_output_list = supervisor_milestone_list + non_supervisor_list\n\n # # Print out the results\n print ('Plain Text')\n pprint.pprint(final_output_list)", "def main():\n boarding_passes = get_boarding_passes(\"./data_5.dat\")\n seat_ids = get_seat_ids(boarding_passes)\n print(get_highest_seat_id(seat_ids))\n print(get_missing_seat_id(seat_ids))", "def main():\n args = parse_args()\n if check_args(args):\n read_descriptions(args)\n generate_deletes(args)", "def main():\n\n args = docopt.docopt(__doc__, version='0.0.1')\n\n # Initialize expyriment & wait its message to show\n initialize.init_arguments(args)\n exp = initialize.init_expyriment(args)\n\n # Useful shortcuts throughout the file\n kb = expyriment.io.Keyboard()\n\n # If we need to calibrate, then do so and terminate.\n if args[\"calibrate\"]:\n calibration(exp, args)\n expyriment.control.end('Merci !', 2000)\n return 0\n\n # Hash table for fast retrieval when presenting: reading from disk is slow!\n hash_table = dict()\n\n # Now let's read the csv file line by line and populate the events.\n # PriorityQueue sort on insertion based on the first element of the\n # inserted tuple: this means your csv file can have random order, or that\n # you can take input from several csv files\n events = queue.PriorityQueue()\n for csv_file in args[\"<file>\"]:\n # Save the path to the CSV file\n exp.add_experiment_info(csv_file)\n\n # Create the path to the stimuli\n bp = args[\"--stim-dir\"]\n\n # Open the csv file and read its rows.\n # ATTENTION : Encoding is platform dependant. See the open() manual\n for row in csv.reader(open(csv_file), delimiter='\\t'):\n # Destruct a row into its parts, they will be of type str\n onset, stype, f, *meta = row\n\n # If this is the first encounter of this stimuli then preload it\n if (stype, f) not in hash_table:\n hash_table[stype, f] = load_stimuli(stype, f, bp, args)\n hash_table[stype, f].preload()\n\n # Then push relevant events based on the type\n events.put((int(onset), stype, f, (stype, f), meta))\n\n expyriment.control.start(skip_ready_screen=True,\n subject_id=args[\"--subject-id\"])\n\n good = expyriment.stimuli.Audio(bp + \"/correct.wav\")\n bad = expyriment.stimuli.Audio(bp + \"/incorrect.wav\")\n good.preload()\n bad.preload()\n\n show_text(\"Waiting for scanner trigger\", args).present()\n kb.wait_char('t')\n\n # Start the experiment clock and loop through the events\n clock = expyriment.misc.Clock()\n last_right_pos = -1\n has_played = False\n while not events.empty():\n onset, stype, id, (stype, f), *meta = events.get()\n\n # If it's still too early, then wait for the onset but log keypresses\n while clock.time < (onset - 1):\n k = kb.check()\n if k is not None:\n exp.data.add([clock.time, \"keypressed\", k])\n if (not has_played) and (stype == \"oddity\" or stype == \"oddity-faces\"):\n has_played = True\n if k == 114:\n if last_right_pos in [0, 1, 5]:\n good.present()\n elif last_right_pos in [2, 3, 4]:\n bad.present()\n elif k == 108:\n if last_right_pos in [2, 3, 4]:\n good.present()\n elif last_right_pos in [0, 1, 5]:\n bad.present()\n\n # When time has come, present the stimuli and log that you just did so\n reported_time = hash_table[stype, f].present()\n if (stype == \"oddity\" or stype == \"oddity-faces\"):\n last_right_pos = int(meta[0][0])\n has_played = False\n exp.data.add(list([clock.time, stype, id, onset, reported_time] + meta[0]))\n\n # Now the experiment is done, terminate the exp\n expyriment.control.end('Merci !', 2000)\n return 0", "def main():\n filename1=input(\"Enter the first filename :\")\n filename2 = input(\"Enter the second filename :\")\n city1, hilltownDict=readFile(filename1)\n city2, valleydaleDict=readFile(filename2)\n max=int (input(\"Enter the maximum number of items :\"))\n\n hilltownDict, valleydaleDict=profitcal(hilltownDict,valleydaleDict)\n hilltownList=convertToList(hilltownDict)\n valleydaleList=convertToList(valleydaleDict)\n\n sort_valley= selectionSort(valleydaleList)\n sort_hill = selectionSort(hilltownList)\n\n\n resultH,tpH=totalProfit(city1,sort_hill,max)\n resultV,tpV=totalProfit(city2,sort_valley,max)\n\n if tpH>tpV:\n print(resultH)\n\n elif tpV>tpH:\n print(resultV)\n\n else:\n print(\"Both city has same profit\" +\"\\n\" + resultH +\"\\n\" + resultV)", "def main():\n data = []\n\n menu_text = \"\"\"\n 1. Display all records\n 2. Add new record\n 3. Edit existing record\n 4. Delete record \n 5. Quit\n \"\"\"\n\n while True:\n print(menu_text)\n choice = input('Enter your choice: ')\n if choice == '1':\n display_all_records()\n elif choice == '2':\n add_new_record(data)\n elif choice == '3':\n results = find_player()\n\n if results:\n edit_existing_record(results)\n else:\n print(f'Player is not on our records\\n')\n elif choice == '4':\n delete_record()\n elif choice == '5':\n break\n else:\n print('Not a valid selection, please try again')", "def load_data(filename):\n \n labels = []\n evidence = []\n\n monthdict = {\n \"Jan\": 0, \"Feb\": 1, \"Mar\": 2, \"Apr\": 3, \"May\": 4, \"June\": 5, \"Jul\": 6,\n \"Aug\": 7, \"Sep\": 8, \"Oct\": 9, \"Nov\": 10, \"Dec\": 11\n }\n\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n\n for row in reader:\n evidence.append(\n [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5])] +\n [float(e) for e in row[6:9]] + [monthdict[row[10]]] +\n [int(e) for e in row[11:14]] + [0 if row[15] == \"New_Visitor\" else 1] +\n [1 if row[16] == \"TRUE\" else 0]\n )\n\n labels.append(0 if row[17] == \"FALSE\" else 1)\n \n return (evidence, labels)", "def main():\n\n print_header()\n statistic = {}\n\n\n while True:\n\n \"\"\"\n System take input for opponent like friend or computer (computer is segregated into two types 'c1' (EasyAi) and 'c2' HarderAi)\n System also take input for player icon and provide only two options 'X' or 'O'\n \"\"\"\n\n opponent = input(\n \"Would you like to play against a friend or the computer? \\n\\t-friend (f)\\n\\t-computer level 1 (c1)\\n\\t-computer level 2 (c2)\")\n icon_coice = input(\"Would you like to play as (X) or (O)? \").upper()\n players = [EasyAi(icon_coice), HarderAi(flip_icon(icon_coice))]\n if opponent.lower() == \"f\":\n players = [Human(icon_coice), Human(flip_icon(icon_coice))]\n # start a game with friend\n if opponent.lower() == \"c1\":\n players = [Human(icon_coice), EasyAi(flip_icon(icon_coice))]\n # start a game with computer\n if opponent.lower() == \"c2\":\n players = [Human(icon_coice), HarderAi(flip_icon(icon_coice))]\n\n start_time = time.time()\n\n \"\"\"\n Load the Game by creating game class object and it takes the Players list\n call its play_game method to start game and return final results\n \"\"\"\n\n game = Game(players=players)\n result = game.play_game()\n ending_time = time.time()\n\n statistic[result] = statistic.get(result, 0) + 1\n\n # calculate game duration\n duration = int(ending_time - start_time)\n duration_string = get_duration_string(duration)\n\n # pass the Game states and it duration to below method\n write_result_to_file(duration_string, statistic)\n\n user_choice = input(\"Would you like to play a game again? [y/n]\")\n if user_choice.lower().startswith(\"n\"):\n break", "def if_rented(ID):\n\n # rented.csv = [ID,rental_date,return_date,RETURNED,login]\n with open('rented.csv','r') as rented_base:\n rented_reader = csv.DictReader(rented_base)\n for rented_data in rented_reader:\n if rented_data['ID'] == ID:\n if rented_data['RETURNED'] == 'TRUE':\n print(\"Book is available!\")\n else:\n print(\"\\tBook is rented and should be back on\",\n rented_data['return_date'],\"\\n\"\n )", "def main():\n\n # Accept up to three command-line arguments\n input_terms = \"<input_GO_terms_file>\"\n input_annotations = \"<input_gene_associations_file>\"\n output_filename = \"<output_filename>\"\n\n\n # The first two arguments are required GO terms file ending with .obo\n # and gene association GAF file ending with .gaf\n if len(sys.argv) < 3:\n sys.exit(\"Please provide required GO terms .obo file and gene \" +\n \"assocatiion .gaf file.\")\n elif not sys.argv[1].endswith(\".obo\"):\n sys.exit(\"Please provide a GO terms .obo file.\")\n elif not sys.argv[2].endswith(\".gaf\"):\n sys.exit(\"Please provide a gene association .gaf file.\")\n else:\n input_terms = sys.argv[1]\n input_annotations = sys.argv[2]\n\n\n # Check if the provided import .obo or .gaf files exist\n if not input_terms:\n sys.exit(input_terms + \" not found. Check the file path and try again.\")\n elif not input_annotations:\n sys.exit(input_annotations + \" not found. Check the file path and try again.\")\n elif len(sys.argv) == 3:\n output_filename = \"results.tsv\"\n sys.stdout = open(\"results.tsv\", \"w\")\n elif len(sys.argv) == 4:\n output_filename = sys.argv[3] + \".tsv\"\n sys.stdout = open(output_filename, \"w\")\n\n\n # parse id and is_valeus and make a go_dict\n split_input_terms = split_terms(input_terms)\n go_dict = {}\n for record in split_input_terms:\n (go_id, is_a) = parse_go_term(record)\n key_go_dict = \"\".join(go_id)\n go_dict[key_go_dict] = is_a\n\n\n # Export an annotation gene information to tsv format into the output file\n gene_association_map = map_protein_to_go(input_annotations)\n for protein, go_ids in sorted(gene_association_map.items()):\n print(protein, end=\"\")\n\n for go_id in sorted(go_ids):\n parent_go_ids = find_parent_terms(go_id, go_dict)\n\n count = 0\n for parent_go_id in sorted(parent_go_ids):\n\n if count == 0:\n print(\"\\t\", go_id, \"\\t\", parent_go_id)\n count += 1\n else:\n print(\"\\t\", parent_go_id, sep=\"\\t\")\n\n sys.stdout.close()", "def compare_EUM_DWD():\n the_dir = \"/home/mobaxterm/DWD-Data/\" \n eum_fd = open(\"%s/result-by-headers.csv\" %(the_dir))\n dwd_fd = open(\"%s/DCPC_Eumetsat.txt\" % (the_dir))\n\n set_dwd = set()\n set_eum = set()\n\n eum_bull_info = {}\n dwd_bull_info = {}\n \n #read DWD Bulletins\n for line in dwd_fd.readlines():\n elems = line.split(\",\")\n #print(\"bulletin %s origin %s\" % (elems[6], elems[7]))\n #set_dwd.add(\"%s:%s\" % (elems[6][1:-1],elems[7][1:-1]))\n #print(\"%s:%s\\n\" % (elems[6][1:-1],elems[7][1:-1]))\n bull_id = \"%s\" % (elems[6][1:-1]) \n \n set_dwd.add(bull_id)\n dwd_bull_info[bull_id] = elems[11] \n\n #read EUM Bulletins\n for line in eum_fd.readlines():\n elems = line.split(\"/\")\n #print(\"bulletins = [%s]\" % (elems[0].strip()))\n bull_id = elems[0].strip().split(\":\")\n\n if len(bull_id[0]) < 6:\n print(\"Warning bull_id is invalid %s. Ignore it !!!\\n\" % (elems))\n else:\n #print(\"bull_id = %s\" % (bull_id))\n #set_eum.add(elems[0].strip())\n set_eum.add(bull_id[0])\n eum_bull_info[bull_id[0]] = elems[1]\n\n in_dwd_not_in_eum = set_dwd.difference(set_eum)\n in_eum_not_in_dwd = set_eum.difference(set_dwd)\n\n print(\"total in dwd set %d, nb elem in dwd and not in eum: %d\" % (len(set_dwd), len(in_dwd_not_in_eum)))\n print(\"total in eum set %d, nb elem in eum and not in dwd: %d\" % (len(set_eum), len(in_eum_not_in_dwd)))\n\n print(\"\\nBulletin IDs in EUMETSAT Bulletin outputs and not on the DWD list\")\n\n print(\"| bullID | filename\")\n print(\"|----------------------------------------\")\n\n # list of elements to ignore\n #treated_elements = [ 'IEDX81', 'IEDX82', 'IEOX11', 'IEOX12', 'IEOX13', 'IEOX14' ]\n treated_elements = []\n\n for b_id in sorted(in_eum_not_in_dwd):\n if len(b_id) >= 6 and b_id not in treated_elements:\n print(\"| %s | %s\" % (b_id, eum_bull_info[b_id].strip())) \n\n print(\"-------------------------------------------------------------------------------------\")\n print(\"-------------------------------------------------------------------------------------\")\n\n #Sort out the DWD list\n\n print(\"Bulletin IDs in DWD Bulletin outputs and not on the EUMETSAT list\")\n\n print(\"| bullID | filename\")\n print(\"|----------------------------------------\")\n\n cpt = 0\n\n ignored = [\"SMAA\", \"UEAA\", \"UKAA\", \"ULAA\", \"USAA\", \"CSAA\", \\\n \"IRRA\", \"IRRD\", \"IRVA\", \"IRVD\", \"IUCA\", \"IUCD\", \"IUCE\", \"IUCH\", \"IUCI\", \"IUCL\", \"IUCN\", \"IUCS\", \\\n \"IUFA\", \"IUFD\", \"IUFE\", \"IUFH\", \"IUFI\", \"IUFL\", \"IUHA\", \"IUHD\", \"IUHE\", \"IUHH\", \"IUHI\", \"IUHL\", \"IURA\", \"IURD\", \"IURE\", \"IURH\", \"IURI\", \\\n \"IURL\", \"IUVA\", \"IUVD\", \"IUVE\", \"IUVH\", \"IUVI\", \"IUVL\", \\\n \"TNAA\", \"TNCA\", \"TNDA\", \"TNIA\", \"TNKA\", \"TNLA\", \"TSAA\", \"TSCA\", \"TSDA\", \"TSIA\", \"TSKA\", \"TSLA\", \"TTAA\", \\\n \"TTAA\", \"TTCA\", \"TTDA\", \"TTIA\", \"TTKA\", \"TTLA\", \\\n \"TWAA\", \"TWCA\", \"TWDA\", \"TWIA\", \"TWKA\", \"TWLA\" ] #ignored extra bullid num\n\n full_bull_ignored_list = [ \"IXRN81\", \"SISC20\", \"SMSC01\", \"SMVX21\"]\n\n for b_id in sorted(in_dwd_not_in_eum):\n # discard all continuations (T2 = E and ii > 1)\n if (len(b_id) >= 6 and b_id[1] == \"E\" and int(b_id[4:6]) > 1) or (\"CONTINUATION\" in dwd_bull_info[b_id]) or (\"METEOSAT 6\" in dwd_bull_info[b_id]) or (\"METEOSAT 5\" in dwd_bull_info[b_id]) or (\"METEOSAT 7 (00 DEGREES)\" in dwd_bull_info[b_id]) or (\"8 PARALLEL OPS\" in dwd_bull_info[b_id]):\n # discard continuation\n #something to be done\n continue\n else:\n if (b_id[0:4] not in ignored) and b_id not in full_bull_ignored_list:\n #print(\"| %s | %s\" % (b_id, dwd_bull_info[b_id].strip()))\n cpt += 1\n\n print(\"total meanigful bull_id in DWD list that are not disseminated by EUM = %d\" % (cpt))\n\n print(\"in_eum_not_in_dwd = %s\" % (in_eum_not_in_dwd))", "def file_from_user():\n try:\n file = get_input(\"Please enter name of the file to get data from. For exit press 0: \")\n if file == \"0\":\n sys.exit()\n LocationList.add_location(read_from_csv(file))\n except FileNotFoundError:\n print(\"\\nThis file wasn't found. Try again or press 0 to exit.\\n\")\n file_from_user()", "def main() -> None:\r\n\r\n with open('main/NLP/LDA/IHE_RESULTS/scopus_prediction_results.json', 'r') as f:\r\n results = json.load(f)\r\n\r\n lst = {}\r\n cols = []\r\n for i in range(20):\r\n lst[str(i)] = []\r\n cols.append(str(i))\r\n\r\n for i in range(20):\r\n for doi, vals in results.items():\r\n if vals[str(i)] >= THRESHOLD:\r\n lst[str(i)].append(doi)\r\n\r\n generate_csv(lst, cols, \"main/NLP/LDA/IHE_RESULTS/pub_analyse_20.csv\")", "def main() -> None:\n\n args = get_args()\n\n if not os.path.isdir(args.outdir):\n os.makedirs(args.outdir)\n\n print('Starting export... (--verbose for updates)')\n\n variables = set()\n measurements_file = os.path.join(args.outdir, 'scrutinizer.csv')\n with open(measurements_file, 'wt') as measurements_fh:\n writer = csv.DictWriter(measurements_fh,\n fieldnames=[\n 'source', 'unit', 'variable_name',\n 'location_name', 'location_type', 'value',\n 'collected_on', 'medium', 'variable_desc'\n ],\n quoting=csv.QUOTE_NONNUMERIC)\n writer.writeheader()\n\n for i, m in enumerate(Measurement, start=1):\n if args.verbose:\n print(f'{i:6}: {m.variable.variable} {m.value}')\n\n writer.writerow({\n 'source': m.variable.source.source,\n 'unit': m.variable.unit,\n 'variable_name': m.variable.variable,\n 'variable_desc': m.variable.description,\n 'location_name': str(m.location.location_name),\n 'location_type': m.location.location_type.location_type,\n 'value': m.value,\n 'collected_on': m.collected_on,\n 'medium': m.medium.medium,\n })\n\n variables.add((m.variable.variable, m.variable.description))\n\n variables_file = os.path.join(args.outdir, 'variables.csv')\n with open(variables_file, 'wt') as variables_fh:\n writer = csv.DictWriter(variables_fh, fieldnames=['name', 'desc'])\n writer.writeheader()\n for key, val in dict(variables).items():\n writer.writerow({'name': key, 'desc': val})\n\n print(f'Done, see outdir \"{args.outdir}\".')", "def main():\n scores_file = open(\"scores.csv\")\n scores_data = scores_file.readlines()\n print(scores_data)\n subjects = scores_data[0].strip().split(\",\")\n score_values = []\n for score_line in scores_data[1:]:\n score_strings = score_line.strip().split(\",\")\n score_numbers = [int(value) for value in score_strings]\n score_values.append(score_numbers)\n scores_file.close()\n scores_by_subjects = reorganise_score(score_values)\n subject_details(scores_by_subjects, subjects)", "def incorrect_payment_list(path): #create a function with a file/path as the parameter\n payment_list = open(path) #open the file\n\n for line in payment_list: #for loop iterates over file lines\n line = line.rstrip()\n content = line.split('|') #splits file lines by '|' to make a list of strings\n\n unknown_number = content[0] #not sure what the first number in the file means. Maybe customer number?\n customer_name = content[1] #assigned customer name to index 1 of the list\n expected_payment = float(content[2]) #assigned the expected payment to index 2 of the list\n actual_payment = float(content[3]) #assigned the actual payment to index 3 of the list\n\n if expected_payment != actual_payment: #if statment says if the customer did not pay \n #the expected amount, then the program should print the statement specified.\n print(\" {} paid ${:.2f}, but should have paid ${:.2f}\".format( \n customer_name, actual_payment, expected_payment) )\n \n payment_list.close() #close the file ", "def main():\n\n # Load arguments\n args = get_args()\n \n assert os.path.exists(args.csv), ' [ERR] File' + os.path.exists(args.csv) +'does not exist'\n\n print(args)\n try:\n dir_name = os.path.dirname(args.json)\n os.mkdir(dir_name)\n print(' [INFO] Creating', dir_name, 'directory')\n except:\n print(' [INFO] Directory', dir_name, 'already exists. Data will be replaced')\n pass\n\n if args.config:\n assert os.path.exists(args.config), ' [ERR] File' + os.path.exists(args.config) +'does not exist'\n dic_types = read_config(args.config)\n else:\n dic_types = {}\n \n # Create json\n create_json_from_csv(args.csv, args.delimiter, args.cols_delimiter, args.keep, dic_types, args.infer_types, args.max_docs, args.json, args.per_line)\n\n return 0", "def load_data(filename):\n evidence = []\n labels = []\n with open(filename) as csvfile:\n file_rows = csv.reader(csvfile)\n next(file_rows)\n for row in file_rows:\n values = []\n\n # - Administrative, an integer\n values.append(int(row.pop(0)))\n # - Administrative_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - Informational, an integer\n values.append(int(row.pop(0)))\n # - Informational_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - ProductRelated, an integer\n values.append(int(row.pop(0)))\n # - ProductRelated_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - BounceRates, a floating point number\n values.append(float(row.pop(0)))\n # - ExitRates, a floating point number\n values.append(float(row.pop(0)))\n # - PageValues, a floating point number\n values.append(float(row.pop(0)))\n # - SpecialDay, a floating point number\n values.append(float(row.pop(0)))\n # - Month, an index from 0 (January) to 11 (December)\n values.append(month_to_index(row.pop(0)))\n # - OperatingSystems, an integer\n values.append(int(row.pop(0)))\n # - Browser, an integer\n values.append(int(row.pop(0)))\n # - Region, an integer\n values.append(int(row.pop(0)))\n # - TrafficType, an integer\n values.append(int(row.pop(0)))\n # - VisitorType, an integer 0 (not returning) or 1 (returning)\n visitor_type = row.pop(0)\n if visitor_type == \"Returning_Visitor\":\n values.append(1)\n else:\n values.append(0)\n # - Weekend, an integer 0 (if false) or 1 (if true)label = row.pop(0)\n weekend = row.pop(0)\n if weekend == \"TRUE\":\n values.append(1)\n else:\n values.append(0)\n\n evidence.append(values)\n\n label = row.pop(0)\n if label == \"TRUE\":\n labels.append(1)\n else:\n labels.append(0)\n\n return evidence, labels", "def main():\n\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def run(params, conn, outputfile):\n date_begin = parse(params['date_begin'] + ' 00:00:00 +0700')\n date_end = parse(params['date_end'] + ' 23:59:59 +0700')\n domain_id = params['domain_id']\n authority_ids = params['authority_ids']\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19', domain_id)\n main_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19-followup', domain_id)\n follow_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n line_list = join(main_data, follow_data)\n tabular(line_list)\n\n if len(line_list) == 0:\n return False\n\n df = pandas.DataFrame(line_list)\n df['date'] = df['date'].dt.tz_convert(tz)\n df['date'] = df['date'].dt.strftime('%d/%m/%Y %H:%M')\n writer = pandas.ExcelWriter(outputfile)\n df.to_excel(writer, 'covid-19', columns=['report_id', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'date', 'latitude', 'longitude',\n '01', '02', '03', '04', '05', '06',\n '07', '08', '09', '10', '11', '12', '13', '14'], index=False)\n ldf = pandas.DataFrame(flat(main_data))\n ldf['date'] = ldf['date'].dt.tz_convert(tz)\n ldf.sort_values(by=['date'], inplace=True)\n ldf['date'] = ldf['date'].dt.strftime('%d/%m/%Y %H:%M')\n\n def is_followup(row):\n return row['report_id'] != row['group_id']\n\n ldf['followup'] = ldf.apply(is_followup, axis=1)\n ldf.to_excel(writer,\n 'all',\n columns=['report_id', 'group_id', 'followup', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'total_times', 'activity_other',\n 'date', 'latitude', 'longitude'],\n index=False)\n writer.save()\n return True", "def process_view_data(self):\r\n self.print_options(self.view_data_options,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then redirects to the appropriate function.\r\n \"\"\"\r\n n = (input(\"What would you like to view? Please input the correpsonding integer:\"))\r\n\r\n if n == str(1):\r\n self.print_options(self.population.list_of_countries)\r\n self.process_view_data()\r\n elif n == str(2):\r\n self.print_options(self.population.columns)\r\n self.process_view_data()\r\n elif n == str(3):\r\n print(self.population)\r\n self.process_view_data()\r\n elif n == str(4):\r\n self.csv_output()\r\n file_directory = os.getcwd() + \"\\output.csv\"\r\n print(\"File output completed. Saved at %s\" %file_directory)\r\n self.process_view_data()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"Please input a valid digit, 'q' or 'b'\")", "def main_dummy():\n\t#read response json from file\n\twith open('json_dumps\\odds.json', 'r') as odds_file:\n\t\todds_json = json.load(odds_file)\n\t#print(odds_json)\n\n\tdata_processor = DataProcessor()\n\tdata_processor.filter_data(odds_json)\n\tdata_processor.sort_max_h2h_odds()\n\tdata_processor.check_for_h2h_odds_at_threshold(2.05)\n\t\n\tprint(\"dummy end\")\n\n\t# with open('data_test.csv', mode='w') as employee_file:\n\t# \temployee_writer = csv.writer(employee_file, dialect='excel')\n\t# \temployee_writer.writerow(data_processor.teams)\n\t# \temployee_writer.writerow(data_processor.h2h_odds)\n\t# \temployee_writer.writerow(data_processor.betting_sites)", "def open_actuarial_data(sex, years):\n filename = \"adj_act_data_2014.txt\" #file generated using get_actdata_2014.py\n\n all_data = []\n\n with open(filename) as f:\n text = csv.reader(f) \n for line in text:\n all_data.append(line)\n\n #I discovered the csv reader after I had already\n #written the get_singlename_year() function\n \n alive_prob = []\n shift = min(years) - 1880\n for index, item in enumerate(years):\n index +=shift\n if sex == \"M\":\n #Male alive prob. data is stored in column 4 of actuary file\n alive_prob.append(float(all_data[index][4])) \n elif sex == \"F\":\n #Female alive prob. data is stored in column 8 of actuary file\n alive_prob.append(float(all_data[index][8]))\n else:\n print \"Neither F or M chosen\"\n return False\n\n\n return alive_prob #returns the probability of being alive in 2017", "def see_raw_data(city):\n\n while True:\n try:\n see_raw_data_input = input('\\nIn addition of the stats above, would you like to scroll through the raw data? (y/n)\\n')\n if see_raw_data_input not in ('y', 'n'):\n raise Exception ('Invalid answer')\n if see_raw_data_input == 'n':\n break\n if see_raw_data_input == 'y':\n with open (CITY_DATA[city], 'r') as f:\n reader = csv.reader(f)\n count_row_start_iteration = 0\n count_row_read = 0\n for row in reader:\n print(row)\n count_row_read += 1\n if count_row_read == count_row_start_iteration +6:\n continue_scroll = input('\\nDo you want to continue scrolling 5 more rows through the raw data? (y/n): ')\n if continue_scroll == 'n':\n break\n else:\n count_row_start_iteration +=5\n except Exception :\n print (\"Please answer 'y' or 'n'\\n\")", "def main(filepath):\n path = Path(filepath)\n\n if not path.is_file():\n click.echo('reading_detector filepath_csv')\n return sys.exit(1)\n try:\n fil = open(path)\n results = app.run_ml_detection(fil)\n headers = ['Client', 'Month', 'Suspicious', 'Median']\n output_result = [[result.client_id, result.month.strftime('%Y-%m'),\n result.suspicious_reading, result.median]\n for result in results]\n click.echo('{0}'.format(tabulate(output_result, headers=headers, tablefmt=\"github\")))\n except IOError as err:\n logging.error(err, exc_info=True)\n click.echo(\"Input file is incorrect\")\n return sys.exit(1)", "def main():\n args = parse_arguments()\n\n de_data = pd.read_csv(args.raw_file, sep=\"\\t\")\n de_data.rename(columns={\"Unnamed: 0\": \"gene_id\"}, inplace=True)\n de_data.fillna(value=1, inplace=True)\n columns = {}\n col_order = []\n\n # Make sure all listed numeric columns are valid numeric variables based\n # on a union of numeric column names from cuffdiff, edgeR, deseq2 and test\n # files.\n numeric_columns = [\n \"baseMean\",\n \"log2FoldChange\",\n \"lfcSE\",\n \"stat\",\n \"pvalue\",\n \"padj\",\n \"value_1\",\n \"value_2\",\n \"log2(fold_change)\",\n \"test_stat\",\n \"p_value\",\n \"q_value\",\n \"logfc\",\n \"fdr\",\n \"stat\",\n \"logFC\",\n \"logCPM\",\n \"LR\",\n \"Pvalue\",\n \"FDR\",\n ]\n de_columns = de_data.columns\n\n for column in numeric_columns:\n if column not in de_columns:\n continue\n\n if not is_numeric_dtype(de_data[column]):\n msg = (\n f\"Column {column} is not numeric. Please make sure \"\n f\"that the input file has valid numeric values (i.e. \"\n f\"periods for decimal places).\"\n )\n send_message(error(msg))\n raise ValueError(msg)\n\n if args.gene_id:\n if args.gene_id == \"index\":\n columns[\"gene_id\"] = list(de_data.index.astype(str))\n col_order.append(\"gene_id\")\n else:\n columns[\"gene_id\"] = list(de_data[args.gene_id].astype(str))\n col_order.append(\"gene_id\")\n\n if args.logfc:\n col = np.array(de_data[args.logfc])\n col[np.isinf(col)] = 0\n columns[\"logfc\"] = list(col)\n col_order.append(\"logfc\")\n\n if args.fdr:\n columns[\"fdr\"] = list(de_data[args.fdr])\n col_order.append(\"fdr\")\n\n if args.pvalue:\n columns[\"pvalue\"] = list(de_data[args.pvalue])\n col_order.append(\"pvalue\")\n\n if args.fwer:\n columns[\"fwer\"] = list(de_data[args.fwer])\n col_order.append(\"fwer\")\n\n if args.logodds:\n columns[\"logodds\"] = list(de_data[args.logodds])\n col_order.append(\"logodds\")\n\n if args.stat:\n columns[\"stat\"] = list(de_data[args.stat])\n col_order.append(\"stat\")\n\n with open(args.output_json, \"w\") as f:\n json.dump(columns, f, separators=(\",\", \":\"), allow_nan=False)\n\n outdf = pd.DataFrame(columns)\n outdf = outdf[col_order]\n outdf.to_csv(args.output_file, sep=\"\\t\", index=False, compression=\"gzip\")", "def main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def main():\n lines, filename = get_filename()\n album_dictionary = extract_all_albums(lines)\n album_dictionary = read_sales(lines, album_dictionary)\n print_table(album_dictionary, filename)", "def test_part1_code():\n\n # Simple test for reader\n test_table = read_csv_file(\"test_case.csv\") # create a small CSV for this test\n print_table(test_table)\n print()\n\n # Test the writer\n cancer_risk_table = read_csv_file(\"cancer_risk05_v4_county.csv\")\n write_csv_file(cancer_risk_table, \"cancer_risk05_v4_county_copy.csv\")\n cancer_risk_copy = read_csv_file(\"cancer_risk05_v4_county_copy.csv\")\n\n # Test whether two tables are the same\n for row in range(len(cancer_risk_table)):\n for col in range(len(cancer_risk_table[0])):\n if cancer_risk_table[row][col] != cancer_risk_copy[row][col]:\n print(\"Difference at\", row, col, cancer_risk_table[row][col], cancer_risk_copy[row][col])", "def main(raw_filepath, interim_filepath, processed_filepath):\n raw_filepath = Path(raw_filepath)\n interim_filepath = Path(interim_filepath)\n processed_filepath = Path(processed_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n years = ['2010', '2011', '2012', '2013', '2014']\n\n #############################################################\n ################ Life Expectancy Outcome ####################\n #############################################################\n\n le_birth = pd.read_csv(raw_filepath / 'US_A.csv',\n usecols=['Tract ID', 'e(0)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index('t10_cen_uid_u_2010')\n\n le_other = pd.read_csv(raw_filepath / 'US_B.csv',\n usecols=['Tract ID', 'Age Group', 'e(x)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index(['t10_cen_uid_u_2010', 'Age Group']) \\\n .sort_index() \\\n .loc[(slice(None), ['15-24', '35-44', '55-64']), :] \\\n .unstack() \\\n .reindex(le_birth.index) # use the same tracts for all experiments\n\n le_other.columns = ['e(20)', 'e(40)', 'e(60)']\n\n # le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n # le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n # le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n # le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n\n ##############################################################\n ################## Priority Dataset ##########################\n ##############################################################\n\n with open(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', 'r') as f:\n cols = f.readline().strip().split(',')\n\n proj_cols = [x for x in cols if x[-4:] in years]# and\n # get all the priority NETS columns for later\n net_cols = ['t10_cen_uid_u_2010'] + [x[:11] + '_d_' + x[14:] for x in cols if '_net_' in x]\n\n data_X = pd.read_csv(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', usecols=proj_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010')\n\n # Create % younger than 25 (this method is far less than ideal)\n ag25up = data_X.filter(regex='.*(_pop_c_|ag25up).*')\n ag25up_coltuples = [(x[:-4], x[-4:]) for x in ag25up.columns]\n ag25up.columns = pd.MultiIndex.from_tuples(ag25up_coltuples)\n ag25up_long = ag25up.stack()\n ag25dwn_p = ((ag25up_long['t10_ldb_pop_c_'] - ag25up_long['t10_ldb_ag25up_c_'])\n / ag25up_long['t10_ldb_pop_c_']).unstack()\n ag25dwn_p.columns = ['t10_ldb_ag25dwn_p_' + x for x in ag25dwn_p.columns]\n\n # Create % older than 65\n ag65up = data_X.filter(regex='.*(_pop_c_|a60up).*')\n ag65up_coltuples = [(x[:-4], x[-4:]) for x in ag65up.columns]\n ag65up.columns = pd.MultiIndex.from_tuples(ag65up_coltuples)\n ag65up_long = ag65up.stack()\n ag65up_p = (ag65up_long['t10_ldb_a60up_c_'] / ag65up_long['t10_ldb_pop_c_']) \\\n .unstack()\n ag65up_p.columns = ['t10_ldb_ag60up_p_' + x for x in ag65up_p.columns]\n\n # Add our new measure\n data_X = pd.concat([data_X, ag25dwn_p, ag65up_p], axis=1)\n\n # Get rid of all count variables, including nets\n no_count_cols = [x for x in data_X.columns if '_c_' not in x]\n data_X = data_X[no_count_cols]\n\n\n drop_cols = ['t10_gis_area_l_2010',\n 'm10_cen_uid_u_2010',\n 'm10_cen_memi_x_2010',\n 'c10_cen_uid_u_2010',\n 'z10_cen_uid_u_2010']\n\n data_X = data_X.drop(columns=drop_cols) \\\n .reindex(le_birth.index)\n\n data_X.columns = pd.Index([(x[:-5], int(x[-4:])) for x in data_X.columns])\n\n X_priority = data_X.groupby(axis=1, level=0).mean()\n X_priority.to_csv(interim_filepath / 'X_priority.csv')\n\n ###########################################################\n #################### NETS Dataset #########################\n ###########################################################\n\n X_nets_allyrs = pd.read_csv(raw_filepath / 'recvd_t10_vars_v8_20190607.csv', usecols=net_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010') \\\n .reindex(le_birth.index)\n\n X_nets_allyrs.columns = pd.Index([(x[:-5], int(x[-4:])) for x in X_nets_allyrs.columns])\n X_nets = X_nets_allyrs.groupby(axis=1, level=0).mean()\n X_nets.to_csv(interim_filepath / 'X_nets.csv')\n\n # Split predictive data by Variable Set\n X_all = pd.concat([X_priority, X_nets], axis=1) \\\n .dropna(how='any')\n\n final_index = le_birth.index.intersection(X_all.index)\n X_all = X_all.reindex(final_index)\n le_birth = le_birth.reindex(final_index)\n le_other = le_other.reindex(final_index)\n\n le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n # Var Set 1\n p1_features = ['t10_ldb_hinci_m',\n 't10_ldb_pop_d',\n 't10_ldb_nhblk_p',\n 't10_ldb_hisp_p',\n 't10_ldb_col_p']\n X_p1 = X_all[p1_features]\n X_p1.to_csv(processed_filepath / 'X_varGroup1.csv')\n\n # Var Set 2\n p2_features = [\n \"t10_ldb_hinci_m\",\n \"t10_ldb_pop_d\",\n \"t10_ldb_ag25dwn_p\",\n \"t10_ldb_ag60up_p\",\n \"t10_ldb_nhblk_p\",\n \"t10_ldb_hisp_p\",\n \"t10_ldb_col_p\",\n \"t10_ldb_lep_p\",\n \"t10_ldb_mrenti_m\",\n \"t10_ldb_multi_p\",\n \"t10_ldb_nhwht_p\",\n \"t10_ldb_asian_p\",\n \"t10_ldb_fb_p\",\n \"t10_ldb_hs_p\",\n \"t10_ldb_unemp_p\",\n \"t10_ldb_npov_p\",\n \"t10_ldb_vac_p\",\n \"t10_ldb_own_p\",\n \"t10_ldb_mhmvali_m\"\n ]\n X_p2 = X_all[p2_features]\n X_p2.to_csv(processed_filepath / 'X_varGroup2.csv')\n\n # Var Set 3\n X_p3 = X_nets.reindex(final_index)\n X_p3.to_csv(processed_filepath / 'X_varGroup3.csv')\n\n # Var Set 4\n X_p4 = X_all\n X_p4.to_csv(processed_filepath / 'X_varGroup4.csv')", "def main():\n # AVAILABLE for implementation:\n # 'go_terms', 'member_databases', 'integrated', 'entry_annotations', ''\n #\n # USED:\n # basics: 'accession', 'type', 'description', 'counters', 'entry_id', 'source_database', 'name'\n # hierarchy\n # wikipedia\n # literature\n # cross_references\n # overlaps_with\n\n parser = argparse.ArgumentParser(description=\"Retrieve InterPro documents and convert them into json\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic information such as accession, \"\n \"type, name, description, counters, entry_id and \"\n \"source_database\")\n parser.add_argument(\"--hierarchy\", \"-hi\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--wikipedia\", \"-w\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--literature\", \"-l\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--cross_references\", \"-cr\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--overlaps\", \"-o\", action=\"store_true\", help=\"\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # if nothing specified, output all available information for the entry\n if None not in (args.basics, args.hierarchy, args.wikipedia, args.literature, args.cross_references, args.overlaps):\n args.basics = True\n args.hierarchy = True\n args.wikipedia = True\n args.literature = True\n args.cross_references = True\n args.overlaps = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basics=args.basics, hierarchy=args.hierarchy, wikipedia=args.wikipedia,\n literature=args.literature, cross_references=args.cross_references, overlaps=args.overlaps)\n print(json.dumps(documents, sort_keys=True, indent=4))", "def main():\n usage = \"usage: %prog [options] results\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) != 1:\n parser.print_help()\n return 2\n\n # do stuff\n print('md5\\tgameworked')\n with open(args[0]) as results:\n results.readline() # Ignore header\n for line in results:\n fields = line.split('\\t')\n fields[0] = fields[0].replace('.exe', '')\n countfields = filter(iscount, fields)\n if gameworked(countfields):\n print('%s\\tYES' % fields[0])\n else:\n print('%s\\tNO' % fields[0])", "def main():\n if len(sys.argv) == 4:\n\n messages_path, categories_path, database_path = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_path, categories_path))\n df = load_data(messages_path, categories_path)\n\n print('Cleaning data...')\n df = clean_data(df)\n\n print('Saving data...\\n DATABASE: {}'.format(database_path))\n save_data(df, database_path)\n\n print('Cleaned data saved to database!')\n\n else:\n print('Please provide the filepaths of the messages and categories '\n 'datasets as the first and second argument respectively, as '\n 'well as the filepath of the database to save the cleaned data '\n 'to as the third argument. \\n\\nExample: python process_data.py '\n 'disaster_messages.csv disaster_categories.csv '\n 'DisasterResponse.db')", "def main():\n if len(sys.argv) >= 2:\n filename = sys.argv[1]\n else:\n print 'usage: ./Osmos.py file'\n sys.exit(1)\n with open(filename, 'rU') as file_handle:\n casenum = int(file_handle.readline())\n for case in range(1, casenum + 1):\n print handle_case(case, [file_handle.readline() for x in range(2)])", "def main_rnx_obsstat(argv):\n\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n global dStat\n dStat = {}\n dStat['cli'] = {}\n dStat['time'] = {}\n dStat['ltx'] = {}\n dStat['plots'] = {}\n dStat['info'] = {}\n\n dStat['cli']['obsstatf'], dStat['cli']['freqs'], dStat['cli']['mask'], dStat['cli']['cvsdb'], show_plot, logLevels = treatCmdOpts(argv)\n\n # detect used GNSS from the obsstatf filename\n dStat['info']['gnss'] = os.path.splitext(os.path.basename(dStat['cli']['obsstatf']))[0][-1]\n dStat['info']['gnss_name'] = gfzc.dict_GNSSs[dStat['info']['gnss']]\n # print('{} => {}'.format(dStat['gnss'], dStat['gnss_name']))\n\n # create logging for better debugging\n logger, log_name = amc.createLoggers(baseName=os.path.basename(__file__), logLevels=logLevels)\n\n # read the observation header info from the Pickle file\n dStat['obshdr'] = '{obsf:s}.obshdr'.format(obsf=os.path.splitext(dStat['cli']['obsstatf'])[0][:-2])\n try:\n with open(dStat['obshdr'], 'rb') as handle:\n dStat['hdr'] = pickle.load(handle)\n dStat['marker'] = dStat['hdr']['file']['site']\n # get interval, start and end times of observation\n dStat['time']['interval'] = float(dStat['hdr']['file']['interval'])\n dStat['time']['first'] = datetime.strptime(dStat['hdr']['data']['epoch']['first'].split('.')[0], '%Y %m %d %H %M %S')\n dStat['time']['last'] = datetime.strptime(dStat['hdr']['data']['epoch']['last'].split('.')[0], '%Y %m %d %H %M %S')\n # get frequencies in the observation file\n dStat['info']['freqs'] = dStat['hdr']['file']['sysfrq'][dStat['info']['gnss']]\n except IOError as e:\n logger.error('{func:s}: error {err!s} reading header file {hdrf:s}'.format(hdrf=colored(dStat['obshdr'], 'red'), err=e, func=cFuncName))\n sys.exit(amc.E_FILE_NOT_EXIST)\n\n # verify input\n check_arguments(logger=logger)\n\n # sys.file_exists(9)\n\n logger.info('{func:s}: Imported header information from {hdrf:s}\\n{json!s}'.format(func=cFuncName, json=json.dumps(dStat['hdr'], sort_keys=False, indent=4, default=amutils.json_convertor), hdrf=colored(dStat['obshdr'], 'blue')))\n\n # dStat['time']['first'] = datetime.strptime(dStat['hdr']['data']['epoch']['first'].split('.')[0], '%Y %m %d %H %M %S')\n # dStat['time']['last'] = datetime.strptime(dStat['hdr']['data']['epoch']['last'].split('.')[0], '%Y %m %d %H %M %S')\n # print(dStat['time']['first'])\n # print(type(dStat['time']['first']))\n\n # read obsstat into a dataframe and select the SNR for the selected frequencies\n dfObsStat = read_obsstat(logger=logger)\n amutils.logHeadTailDataFrame(df=dfObsStat, dfName='dfObsStat', callerName=cFuncName, logger=logger)\n\n # get the observation time spans based on TLE values\n dfTLE, dfTLEVis = tle_visibility.PRNs_visibility(prn_lst=dfObsStat.PRN.unique(),\n DTG_start=dStat['time']['first'],\n DTG_end=dStat['time']['last'],\n interval=dStat['time']['interval'],\n cutoff=dStat['cli']['mask'],\n logger=logger)\n amutils.logHeadTailDataFrame(df=dfTLE, dfName='dfTLE', callerName=cFuncName, logger=logger)\n amutils.logHeadTailDataFrame(df=dfTLEVis, dfName='dfTLEVis', callerName=cFuncName, logger=logger)\n\n # combine the observation count and TLE count per PRN\n dfTLEtmp = pd.DataFrame(columns=['PRN', 'TLE_count']) # , dtype={'PRN':'object','TLE_count':'int'})\n dfTLEtmp.PRN = dfTLEVis.index # convert the TLE index (which are the PRNs) to a column\n amutils.logHeadTailDataFrame(df=dfTLEtmp, dfName='dfTLEtmp', callerName=cFuncName, logger=logger)\n\n # add colmun which contains the total number of observations over all arcs\n for i, (prn, tle_prn) in enumerate(dfTLEVis.iterrows()):\n dfTLEtmp.iloc[i].TLE_count = sum(tle_prn.tle_arc_count)\n\n print('dfTLEtmp = {}'.format(dfTLEtmp))\n # combine TLE and actual observations (only SNR column used since values for all other obst are the same)\n dfObsTLE = pd.merge(dfObsStat, dfTLEtmp, on='PRN')\n amutils.logHeadTailDataFrame(df=dfObsTLE,\n dfName='dfObsTLE',\n callerName=cFuncName,\n logger=logger)\n # store the observation / TLE info in CVS file\n obsstat_name = '{basen:s}.obstle'.format(basen=os.path.basename(dStat['obsstatf']).split('.')[0])\n dfObsTLE.to_csv(obsstat_name, index=False)\n\n # store the information in cvsdb\n cvsdb_ops.cvsdb_open(cvsdb_name=dStat['cli']['cvsdb'], logger=logger)\n cvsdb_update_obstle(obsstatf=dStat['obsstatf'], dfObsTle=dfObsTLE, dTime=dStat['time'], cvsdb=dStat['cli']['cvsdb'], logger=logger)\n cvsdb_ops.cvsdb_sort(cvsdb_name=dStat['cli']['cvsdb'], logger=logger)\n\n # plot the Observation and TLE observation count\n dStat['plots']['obs_count'] = tleobs_plot.obstle_plot_obscount(marker=dStat['marker'],\n obsf=dStat['obsstatf'],\n dfObsTle=dfObsTLE,\n dTime=dStat['time'],\n show_plot=show_plot,\n logger=logger)\n dStat['plots']['relative'] = tleobs_plot.obstle_plot_relative(marker=dStat['marker'],\n obsf=dStat['obsstatf'],\n dfObsTle=dfObsTLE,\n dTime=dStat['time'],\n show_plot=show_plot,\n logger=logger)\n\n # create a section for latex reporting\n sec_obsstat = ltx_rnxobs_reporting.ltx_obsstat_analyse(dInfo=dStat['info'],\n obsstatf=dStat['obsstatf'],\n dfObsTle=dfObsTLE,\n plots=dStat['plots'],\n script_name=os.path.basename(__file__))\n dStat['ltx']['obsstat'] = os.path.join(dStat['ltx']['path'],\n '{marker:s}_02_{gnss:s}_obs_stat'.format(marker=dStat['obsstatf'][:9],\n gnss=dStat['info']['gnss']))\n sec_obsstat.generate_tex(dStat['ltx']['obsstat'])\n\n # store the observation info from TLE in CVS file\n tle_name = '{basen:s}.tle'.format(basen=os.path.basename(dStat['obsstatf']).split('.')[0])\n tle_cvs(dfTleVis=dfTLEVis, cvs_name=tle_name, logger=logger)\n # dfTLE.to_csv(tle_name, index=True, date_format='%H:%M:%S')\n\n # dGFZ['ltx']['script'] = os.path.join(dGFZ['ltx']['path'], 'script_info')\n logger.info('{func:s}: Project information =\\n{json!s}'.format(func=cFuncName, json=json.dumps(dStat, sort_keys=False, indent=4, default=amutils.json_convertor)))\n\n # report to the user\n\n # store the json structure\n jsonName = os.path.join(dStat['dir'], '{scrname:s}.json'.format(scrname=os.path.splitext(os.path.basename(__file__))[0]))\n with open(jsonName, 'w+') as f:\n json.dump(dStat, f, ensure_ascii=False, indent=4, default=amutils.json_convertor)\n\n # clean up\n copyfile(log_name, os.path.join(dStat['dir'], '{scrname:s}.log'.format(scrname=os.path.basename(__file__).replace('.', '_'))))\n os.remove(log_name)", "def main():\n # clear the console screen\n os.system('clear')\n\n # get the names of the players\n player_1 = raw_input('What is the name of player 1? ')\n player_2 = raw_input('What is the name of player 2? ')\n\n # ask for the board size\n try:\n board_size = raw_input('How many rows and columns would you like to play with (3)? ')\n if board_size.strip() == '':\n board_size = 3\n else:\n board_size = int(board_size)\n except Exception as e:\n print \"I don't recognize your board size. Try again.\"\n sys.exit()\n\n # create the board (initialize with '-' instead of X and 0)\n board = create_board(board_size)\n\n # do tic-tac-toe until a winner is found\n outcome = tic_tac_toe(board, player_1, player_2)\n\n # print the outcome\n os.system('clear')\n print_board(board)\n print \"\\n%s wins!\" % (player_1 if outcome == 1 else player_2)\n\n\n # The code below writes the outcome to a file and then determines each \n # player's record. All you need to do is ensure that outcome is a boolean \n # value with True representing a win for player 1 and ensure that player_1 \n # and player_2 are both set.\n\n\n # the name of our game results file\n results_file = 'game_results.txt'\n\n write_result(results_file, outcome, player_1, player_2)\n\n print_records(results_file, player_1, player_2)\n\n\n # wait for the user to press enter to quit\n raw_input('\\nPress enter to quit...')\n\n # clear the console screen\n os.system('clear')", "def main() -> None:\n\n airports = {}\n some_info = {'item1': 1,\n 'item2': 2,\n }\n\n # adding items\n airports['YYZ'] = \"Toronto Pearson\"\n airports['YOW'] = \"Ottawa Canada\"\n airports['DUB'] = \"Dublin Ireland\"\n airports['LHR'] = \"London Heathrow\"\n\n # input & process\n print(\"All the airports:\")\n for key, value in airports.items():\n print(f\"The airport code is {key} for {value}.\")\n print(\"\")\n\n airport_name = input(\"Type in an airport code: \")\n if airport_name in airports:\n print(f\"The name of the airport you chose is {airports[airport_name]}.\")\n else:\n print(\"That airport is not in the airport's dictionary.\")\n\n print(\"\\nDone.\")", "def main():\n \n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')", "def main() -> None:\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print(\n \"Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}\".format(\n messages_filepath, categories_filepath\n )\n )\n df = load_data(messages_filepath, categories_filepath)\n\n print(\"Cleaning data...\")\n df = clean_data(df)\n\n print(\"Saving data...\\n DATABASE: {}\".format(database_filepath))\n save_data(df, database_filepath)\n\n print(\"Cleaned data saved to database!\")\n\n else:\n print(\n \"Please provide the filepaths of the messages and categories \"\n \"datasets as the first and second argument respectively, as \"\n \"well as the filepath of the database to save the cleaned data \"\n \"to as the third argument. \\n\\nExample: python process_data.py \"\n \"disaster_messages.csv disaster_categories.csv \"\n \"DisasterResponse.db\"\n )", "def main():\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',\n level=logging.INFO)\n\n parser = argparse.ArgumentParser(description='Monitors observation reception.')\n parser.add_argument('--config', type=str, help='configuration file to use '\n '(default: monitor.cfg)')\n\n args = parser.parse_args()\n config_file = args.config if args.config else 'monitor.cfg'\n\n if not exists(config_file):\n logging.error('Could not find configuration file \"%s\"', args.config_file)\n sys.exit(1)\n\n config = configparser.ConfigParser()\n config.read(config_file)\n\n state_file_dir = None\n if 'STATE_FILE_DIRECTORY' in environ:\n state_file_dir = environ['STATE_FILE_DIRECTORY']\n if not exists(state_file_dir) or not isdir(state_file_dir):\n logging.error('State file directory \"%s\" does not exist', state_file_dir)\n sys.exit(1)\n\n state_file_name = 'monitor_state.json' if not state_file_dir \\\n else f'{state_file_dir}/monitor_state.json'\n\n try:\n with open(state_file_name, 'r', encoding='utf-8') as state_file:\n state = json.load(state_file)\n except FileNotFoundError:\n state = {'observation': {'email_sent': 'False'},\n 'blebeacon': {'email_sent': 'False'},\n 'ruuvitag': {}}\n for location in config['ruuvitag']['Location'].split(','):\n state['ruuvitag'][location] = {}\n state['ruuvitag'][location]['email_sent'] = 'False'\n\n if config['observation']['Enabled'] == 'True':\n obs = ObservationMonitor(config, state['observation'])\n obs.check_observation()\n state['observation'] = obs.get_state()\n if config['blebeacon']['Enabled'] == 'True':\n beacon = BeaconMonitor(config, state['blebeacon'])\n beacon.check_beacon()\n state['blebeacon'] = beacon.get_state()\n if config['ruuvitag']['Enabled'] == 'True':\n ruuvitag = RuuvitagMonitor(config, state['ruuvitag'])\n ruuvitag.check_ruuvitag()\n state['ruuvitag'] = ruuvitag.get_state()\n\n with open(state_file_name, 'w', encoding='utf-8') as state_file:\n json.dump(state, state_file, indent=4)", "def load_data(filename):\n # create an evidence and label list\n evidence = []\n label = []\n\n # create a dictionary to hold key months matching to their respective values\n month = {'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'June': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8, 'Oct': 9,\n 'Nov': 10, 'Dec': 11}\n\n # open and read the csv file\n with open(filename) as data:\n # use the dictionary csv reader to be able to call the cell values by the csv column header names\n reader = csv.DictReader(data)\n # read each row in the csv and append the evidence and labels to their respective lists\n for row in reader:\n evidence.append([\n int(row[\"Administrative\"]),\n float(row[\"Administrative_Duration\"]),\n int(row[\"Informational\"]),\n float(row[\"Informational_Duration\"]),\n int(row[\"ProductRelated\"]),\n float(row[\"ProductRelated_Duration\"]),\n float(row[\"BounceRates\"]),\n float(row[\"ExitRates\"]),\n float(row[\"PageValues\"]),\n float(row[\"SpecialDay\"]),\n month[row[\"Month\"]],\n int(row[\"OperatingSystems\"]),\n int(row[\"Browser\"]),\n int(row[\"Region\"]),\n int(row[\"TrafficType\"]),\n 1 if row[\"VisitorType\"] == \"Returning_Visitor\" else 0,\n 1 if row[\"Weekend\"] == \"TRUE\" else 0,\n ])\n label.append(\n 1 if row['Revenue'] == 'TRUE' else 0\n )\n\n return evidence, label", "def main_flow():\n user_click, user_click_time = reader.get_user_click(\"../data/rating.txt\") # dict: key:user_id value:[item0,item1...]\n item_info = reader.get_item_info(\"../data/movies.txt\")\n\n sim_info = cal_sim_info(user_click, user_click_time)\n debug_itemsim(item_info, sim_info)\n recom_result = cal_recom_result(sim_info, user_click)\n print(recom_result[\"1\"])\n debug_recomresult(recom_result, item_info)", "def main(filename, deployment_id):\n if not filename:\n print(\n 'Input file is required argument. '\n 'Usage: python datarobot-predict.py <input-file.csv>')\n return 1\n data = open(filename, 'rb').read()\n data_size = sys.getsizeof(data)\n if data_size >= MAX_PREDICTION_FILE_SIZE_BYTES:\n print(\n 'Input file is too large: {} bytes. '\n 'Max allowed size is: {} bytes.'\n ).format(data_size, MAX_PREDICTION_FILE_SIZE_BYTES)\n return 1\n try:\n predictions = make_datarobot_deployment_predictions(data, deployment_id)\n except DataRobotPredictionError as exc:\n print(exc)\n return 1\n print(predictions)\n return 0", "def Main():\n argument_parser = argparse.ArgumentParser(description=(\n 'Extracts information from USN change journal records.'))\n\n argument_parser.add_argument(\n '-d', '--debug', dest='debug', action='store_true', default=False,\n help='enable debug output.')\n\n argument_parser.add_argument(\n 'source', nargs='?', action='store', metavar='PATH',\n default=None, help='path of the USN change journal records.')\n\n options = argument_parser.parse_args()\n\n if not options.source:\n print('Source file missing.')\n print('')\n argument_parser.print_help()\n print('')\n return False\n\n logging.basicConfig(\n level=logging.INFO, format='[%(levelname)s] %(message)s')\n\n output_writer = output_writers.StdoutWriter()\n\n try:\n output_writer.Open()\n except IOError as exception:\n print(f'Unable to open output writer with error: {exception!s}')\n print('')\n return False\n\n usn_records = usn_journal.USNRecords(\n debug=options.debug, output_writer=output_writer)\n usn_records.Open(options.source)\n\n output_writer.WriteText('USN journal records information:')\n output_writer.WriteText(','.join([\n 'Date and time', 'Name', 'File reference', 'Parent file reference']))\n for usn_record in usn_records.ReadRecords():\n # pylint: disable=protected-access\n date_time = usn_record._FormatIntegerAsFiletime(usn_record.timestamp)\n\n mft_entry = usn_record.file_reference & ((1 << 48) - 1)\n sequence_number = usn_record.file_reference >> 48\n file_reference = f'{mft_entry:d}-{sequence_number:d}'\n\n mft_entry = usn_record.parent_file_reference & ((1 << 48) - 1)\n sequence_number = usn_record.parent_file_reference >> 48\n parent_file_reference = f'{mft_entry:d}-{sequence_number:d}'\n\n output_writer.WriteText(','.join([\n date_time, usn_record.name, file_reference, parent_file_reference]))\n\n usn_records.Close()\n\n output_writer.Close()\n\n return True", "def main():\n\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('messages_filepath', help='The location of the csv file containing your messages.')\n parser.add_argument('categories_filepath', help='The location of the csv file containing your message categories.')\n parser.add_argument('database_filepath', help='The location to which you wish to save the cleaned messages database file.')\n \n # parse args or raise expection on failure\n try:\n args = parser.parse_args()\n except:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n raise\n\n # assign command line args to vars\n messages_filepath = args.messages_filepath\n categories_filepath = args.categories_filepath\n database_filepath = args.database_filepath\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')", "def main():\r\n\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the file\r\n csv_reader = csv.reader(csvfile1)\r\n #jummp the first line\r\n next(csv_reader)\r\n #loop through the file\r\n for line in csv_reader:\r\n print(line)", "def main():\n if len(sys.argv) == 2 and sys.argv[1] == 'train':\n trainer = FlightModelTrainer()\n trainer.train()\n return 0\n\n if len(sys.argv) == 2 and sys.argv[1] == 'graphics':\n trainer = FlightModelTrainer()\n trainer.visualize()\n return 0\n\n predictor = FlightPredictor(path_to_weather=WEATHER_TRAIN_DATA_PATH)\n result = predictor.predict(pd.read_csv(FLIGHTS_TEST_DATA_PATH))\n print('result')\n print(result)\n # result.to_csv(\"out.csv\")\n return 0", "def important(ind):\n option = [WAR, ROBBERY, TREASURE_HUNT, FERTILIZER_POTION, MARRIAGE]\n no_option = [BIRTHDAY, SECRET_ROOM_WITH_GOLD, FLOOD, ROBBERY_OF_TREASURY, PLAGUE, WATER_SHREDDING, NOMAD_RAID,\n LONGEVITY_POTION]\n\n indicators = ind\n\n print(YEAR, indicators['year'], '\\n', GROUND, indicators['ground'], '\\n', MONEY, indicators['money'], '\\n',\n GRAIN, indicators['grain'], '\\n', PEOPLE, indicators['people'], '\\n', DISTEMPER, indicators['distemper'],\n sep='')\n\n indicators['money'], indicators['grain'], indicators['people'] = main_questions(indicators['money'],\n indicators['grain'],\n indicators['people'])\n indicators['grain'], indicators['ground'], phrase = plants(indicators['ground'], indicators['grain'])\n\n indicators['year'] += 1\n\n if indicators['grain'] / indicators['people'] < 50 or indicators['ground'] / indicators['people'] < 1:\n indicators['distemper'] += 10\n elif indicators['distemper'] > 5:\n if indicators['grain'] / indicators['people'] > 80 or indicators['ground'] / indicators['people'] > 2.5:\n indicators['distemper'] -= 10\n\n print(phrase)\n indicators = situation(random.randint(0, 1), indicators, option, no_option)\n\n ans = input(GAME_CONTINUE)\n if ans.lower() == YES:\n if indicators['distemper'] == 100 or indicators['ground'] <= 0 or indicators['money'] <= 0 or \\\n indicators['grain'] <= 0 or indicators['people'] <= 0:\n print(THE_END)\n print(TOTAL_RESULTS, '\\n', YEAR, indicators['year'], '\\n', GROUND, indicators['ground'], '\\n', MONEY,\n indicators['money'], '\\n', GRAIN, indicators['grain'], '\\n', PEOPLE, indicators['people'], '\\n',\n DISTEMPER, indicators['distemper'], sep='')\n else:\n important(ind)\n else:\n print(GAME_EXIT)\n print(YEAR, indicators['year'], '\\n', GROUND, indicators['ground'], '\\n', MONEY, indicators['money'],\n '\\n', GRAIN, indicators['grain'], '\\n', PEOPLE, indicators['people'], '\\n', DISTEMPER,\n indicators['distemper'], sep='')" ]
[ "0.6056687", "0.57561624", "0.5681713", "0.5558769", "0.5533532", "0.55103606", "0.54816645", "0.5471882", "0.5470575", "0.5453268", "0.54475284", "0.5446846", "0.5314073", "0.53068393", "0.5291783", "0.52585655", "0.52576137", "0.5234658", "0.5230194", "0.5194857", "0.51882", "0.5187348", "0.51819515", "0.5159562", "0.5134737", "0.5109166", "0.51057583", "0.51003957", "0.50865924", "0.50835425", "0.5066524", "0.50169134", "0.50125015", "0.50045216", "0.4995928", "0.49785677", "0.49637777", "0.49584496", "0.49537396", "0.49184638", "0.49157417", "0.49144012", "0.49099606", "0.49052218", "0.49028996", "0.49027938", "0.49016103", "0.48969394", "0.4895852", "0.48952374", "0.48930588", "0.48923907", "0.4874727", "0.48743412", "0.48673138", "0.48585337", "0.48356786", "0.4833596", "0.48286757", "0.48173317", "0.48133242", "0.4812549", "0.48061788", "0.48038265", "0.4802791", "0.4802179", "0.4794779", "0.4785377", "0.47835335", "0.47830212", "0.47795558", "0.4777301", "0.47686556", "0.47672495", "0.47622624", "0.47594288", "0.47587153", "0.47527733", "0.47485477", "0.47390667", "0.47366822", "0.47221845", "0.47166362", "0.47143677", "0.47102425", "0.47087002", "0.47053483", "0.47023693", "0.46978474", "0.46964088", "0.46960142", "0.46941918", "0.46916336", "0.46896955", "0.4689284", "0.4689051", "0.4687677", "0.46875563", "0.4687085", "0.46870044" ]
0.6281538
0
Keep only notNaN column positions in all arrays.
def drop_nan_columns(arrays): # Keep all column indices not_nan_filter = ones(len(arrays[0]), dtype=bool) # Currently keeping all columns! # Keep column indices without missing value in all arrays # for a in arrays: # not_nan_filter &= ~isnan(a) return [a[not_nan_filter] for a in arrays]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def columns_with_na_values(data):\n aux = data.isna().sum() > 0\n return aux.index[aux.values].values", "def remove_nans(coords):\n s = np.apply_along_axis(sum,1,np.isnan(coords[1])) == 0\n coords[0] = (np.asarray(coords[0])[s]).tolist()\n coords[1] = coords[1][s,:]", "def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]", "def removeNans(data):\n for i in data[:]:\n ind = data.index(i)\n for j in i:\n if np.isnan(j):\n data.remove(i)\n break\n return data", "def remove_empty_columns(x, threshold=0.4):\n # For each column compute the ratio of nan values over the number of rows\n prop_empty_column = (np.isnan(x)).sum(axis=0) / len(x)\n column_mask = prop_empty_column < threshold\n return x[:, column_mask], column_mask", "def remove_nans(arr, nan_rows=None):\n # Remove NaNs\n nconfigs, nt = arr.shape\n if nan_rows is None:\n mask = np.isfinite(arr)\n else:\n mask = np.array([n for n in np.arange(nconfigs) if n not in nan_rows])\n return arr[mask].reshape(-1, nt)", "def locate_nan_rows(arr):\n # Count the number of NaNs in each row\n nan_counts = np.sum(~np.isfinite(arr), axis=1)\n # Trigger on a NaN appearing anywhere in a line/row\n nans, = np.where(nan_counts > 1)\n return frozenset(nans)", "def filter_nan(s,o):\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n data = data[~np.isnan(data).any(1)]\n #data = data[~np.isnan(data)]\n return data[:,0],data[:,1]", "def nonans(array):\n return array[~np.isnan(array)]", "def purgeNanEveryWhere(df):\n #Row-wise dropping\n toDrop = np.array([])\n for i in range(df.shape[0]):\n if( np.sum ( pd.isnull(df.iloc[i]) ) == df.shape[1]-1 ):\n toDrop= np.append(toDrop,i)\n df.drop(df.index[toDrop.astype(int)],inplace=True) \n #Column-wise dropping\n for col in df.columns:\n arr = pd.notnull(df[col])\n nnan = np.sum(arr) \n if (nnan == df.shape[1]):\n df.drop(col,inplace=True,axis=1)\n return df", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col", "def get_columns_with_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n mask = np.array(X[columns_to_check][:rows_to_scan].count() == 0)\n return list(np.array(columns_to_check)[mask])", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def get_columns_with_nulls(X, columns_to_scan = \"all\", rows_to_scan=100000):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n \n columns_to_scan = get_list_of_columns_to_check(columns_to_scan, X.columns)\n mask = np.array(X[columns_to_scan][:rows_to_scan].count() < rows_to_scan)\n return list(np.array(columns_to_scan)[mask])", "def _nan_cells(traces):\n # Find all cells with NaNs\n nancells = []\n ncells = -1\n for cs in traces:\n if len(traces[cs]) > 0:\n ncells = np.shape(traces[cs])[1]\n ns = np.sum(np.sum(np.invert(np.isfinite(\n traces[cs])), axis=2), axis=0)\n vals = np.arange(ncells)\n nancells.extend(vals[ns > 0])\n\n # Set _mask_cells if it hasn't been set\n out = np.zeros(ncells, dtype=bool)\n\n # Convert nancells to a list of good cells\n nancells = np.array(list(set(nancells)))\n if len(nancells) > 0:\n print('Warning: %i cells have NaNs'%len(nancells))\n out[nancells] = True\n\n return out", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def nancnt_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.sum(~np.isnan(a[:, col]))\n return out", "def handel_nans(self):\n col_nan_pct = self.df.isin([' ',np.nan]).mean() #Calculates percent of Nans\n col_names = col_nan_pct[col_nan_pct >= .1].index # Gets name of columns with over 50% Nans\n col_count = [self.df[col].count() for col in col_names for x in self.df if x == col] #Gets length of valid values for column\n dropped_col = [col for col in zip(col_count, col_names) if col[0] <= 1400] #Gets columns names with under 50 values\n [self.df.drop(columns=[col[1]], inplace=True) for col in dropped_col]\n self.dropped_cols_phase_one = dropped_col\n [self.column_dtypes.pop(item[1]) for item in dropped_col]\n self.df[self.target].dropna(inplace=True)", "def get_nan_columns(df):\n df = nan_val_summary(df)\n return df[df['fraction_missing'] > 0]['columns'].values", "def remove_none_from_arrays(self):\r\n\r\n is_nan = numpy.isnan(self.y_values) # array of booleans, element is True if the corresponding element in\r\n # self.y_values is None\r\n\r\n self.x_values = self.x_values[numpy.logical_not(is_nan)]\r\n self.y_values = self.y_values[numpy.logical_not(is_nan)] # replace all None elements\r", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def dropna(df, axis=0, th=0.4):\n df = df.copy()\n axis = 0 if axis==1 else 1\n col_idx = df.isna().sum(axis=axis)/df.shape[axis] <= th\n df = df.iloc[:, col_idx.values]\n return df", "def whichColumnsNA(df):\n return df.columns[df.isna().any()].tolist()", "def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])", "def bad_column_positions(self, x):\n return x.is_null()", "def dropNaN(featZ):\n \n n_cols = len(featZ.columns)\n featZ.dropna(axis=1, inplace=True)\n n_dropped = n_cols - len(featZ.columns)\n \n if n_dropped > 0:\n print(\"Dropped %d features after normalisation (NaN)\" % n_dropped)\n \n return featZ", "def clean_data(x, null, drop_thresh):\n # Do not modify the original dataset\n x_clean = np.copy(x)\n \n # Vector holding, for each feature, the fraction of datapoints with a null value\n null_frac = (1/x_clean.shape[0]) * np.sum(x_clean==null, axis=0)\n # Boolean vector holding, for each feature, whether or not it needs to be kept\n column_to_keep = null_frac <= drop_thresh\n \n # Drop bad columns\n x_clean = x_clean[:, column_to_keep]\n \n # Vector of (list of) indices of columns where there are still null values\n columns_to_interpolate = np.argwhere(np.any(x_clean==null, axis=0))\n \n # For each of those columns, find the mean of non-null values, and substitute it to null values\n for col_list in columns_to_interpolate:\n # Extrapolate only entry of col_list\n col = col_list[0]\n \n # Boolean vector holding, for each row, whether or not it has a \"null\" entry at position \"col\"\n row_non_null = x_clean[:, col] != null\n # Find mean\n interpolation = np.mean(x_clean[row_non_null, col])\n # Substitute it to null values\n row_null = np.logical_not(row_non_null)\n x_clean[row_null, col] = interpolation\n \n return x_clean", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def get_columns_not_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n remove_columns = get_columns_with_all_nulls(X, columns_to_check, rows_to_scan)\n return list(set(columns_to_check)-set(remove_columns))", "def filter_nb(a, filter_func_nb, *args):\n out = a.astype(np.float_)\n\n for col in range(out.shape[1]):\n idxs = np.flatnonzero(~np.isnan(a[:, col]))\n for i in idxs:\n if not filter_func_nb(i, col, a[i, col], *args):\n out[i, col] = np.nan\n return out", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def dropna(*args):\n # (True|False) mask for each row based on NaN values present in the first dataframe\n mask = ~pd.isnull(args[0])\n if mask.ndim == 2:\n mask = mask.all(1) # in 2d array, keep row only if all values are not NaN\n\n return [x.loc[mask] if isinstance(x, pd.DataFrame) else x[mask] for x in args]", "def remove_cols_high_missing_rates(data, min_missing_rate=0.4):\n cols_keep = list(data.isna().mean()[data.isna().mean() < min_missing_rate].index)\n return data[cols_keep], cols_keep", "def remove_nan(self, dataframe):\n return dataframe.dropna()", "def remove_nans4(a, b, c, d):\n a = np.asarray(a)\n b = np.asarray(b)\n c = np.asarray(c)\n d = np.asarray(d)\n\n mask = ~np.isnan(a) & ~np.isnan(b) & ~np.isnan(c) & ~np.isnan(d)\n a = a[mask]\n b = b[mask]\n c = c[mask]\n d = d[mask]\n\n return a, b, c, d", "def drop_nans(target_df):\n target_df.dropna(axis=0, inplace=True)\n return target_df", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def filter_nan(y, x, ids, remove=True, replace_val=0.0):\n mask = np.isnan(x)\n \n if remove:\n # Remove the rows containing any NaN\n row_mask = ~mask.any(axis=1) # Sets to False any rows containing NaN\n x_copy, y_copy, ids_copy = x[row_mask], y[row_mask], ids[row_mask]\n # Remove 0 filled columns\n col_mask = x_copy.sum(axis=0) != 0 # True if the columns is filled with 0\n x_copy = x_copy[:, col_mask] \n return y_copy, x_copy, ids_copy, col_mask\n else:\n # Replace NaN values by replace_val\n x[mask] = replace_val\n return y, x, ids", "def _find_Vgroups(self, X):\n na_value = X[self.V_features].isnull().sum()\n na_list = na_value.unique()\n na_value = na_value.to_dict()\n cols_same_null = []\n for i in range(len(na_list)):\n cols_same_null.append([k for k, v in na_value.items() if v == na_list[i]])\n return cols_same_null", "def get_cols_drop():", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def remove_nans3(a, b, c):\n a = np.asarray(a)\n b = np.asarray(b)\n c = np.asarray(c)\n\n mask = ~np.isnan(a) & ~np.isnan(b) & ~np.isnan(c)\n a = a[mask]\n b = b[mask]\n c = c[mask]\n\n return a, b, c", "def removeNonQuant(df, cols):\r\n df = df[~(df[cols].isnull().all(1))]\r\n return df", "def high_null_count(df, thresh):\n cols_remove = []\n for col in df.columns:\n if df[col].isna().sum() / df.shape[0] >= thresh:\n cols_remove.append(col)\n\n return df.drop(columns=cols_remove, axis=1)", "def removeNan(values):\n\treturn list(filter(lambda v: not math.isnan(v), values))", "def remove_blanks(mat):\n ids = []\n for idx, row in enumerate(mat):\n if not 1 in row:\n ids.append(idx)\n mat = np.delete(mat, ids, 0)\n mat = np.delete(mat, ids, 1)\n return mat", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def filter_NaNpeptides(X, cut=False, tmt=False):\n d = X.select_dtypes(include=[\"float64\"])\n if cut:\n Xidx = np.count_nonzero(~np.isnan(d), axis=1) / d.shape[1] >= cut\n else:\n idx_values = FindIdxValues(X)\n dict_ = defaultdict(list)\n for i in range(idx_values.shape[0]):\n dict_[idx_values[i, 0]].append(idx_values[i, -1])\n Xidx = [len(set(dict_[i])) >= tmt for i in range(X.shape[0])]\n return X.iloc[Xidx, :]", "def _nan_data(data, to_nan=0.2):\n # Number of values to be NaNed as int\n to_nan = int(len(data) * to_nan)\n # Existing NaN's as indicies\n existing_nans = data[data.isnull() == True].index\n return to_nan, existing_nans", "def _get_nan_indices(*tensors: Tensor) ->Tensor:\n if len(tensors) == 0:\n raise ValueError('Must pass at least one tensor as argument')\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def dropna(self) -> \"Dataset\":\n if not self[0]._has_time_axis: # type: ignore\n raise ValueError(\"Not available if no time axis!\")\n\n all_index: List[int] = []\n for i in range(self.n_items):\n x = self[i].to_numpy()\n\n # this seems overly complicated...\n axes = tuple(range(1, x.ndim))\n idx = list(np.where(~np.isnan(x).all(axis=axes))[0])\n if i == 0:\n all_index = idx\n else:\n all_index = list(np.intersect1d(all_index, idx))\n\n return self.isel(all_index, axis=0)", "def fully_hidden_arrays(self):\n hidden = []\n for m in self.masks():\n invalid = self.get_property(m, '_no_valid_items')\n if invalid: hidden.append(m)\n return hidden", "def fix_data(self, df):\n return df.dropna(axis='columns', how='all').fillna(0.0)", "def check_missing_values(col):\n return np.sum(np.isnan(col))", "def show_nan(df):\n nan_df = df[(~df['tweet_user_location'].str.lower().isin(\n [x.lower() for x in LOCATION_DISCARD])) & df['geonameid'].isnull()]\n print(f'Number of NaNs: {len(nan_df.index)}')\n return nan_df", "def get_cols_dummy():", "def find_first_non_nan(array):\n for index, value in enumerate(array):\n if not np.isnan(value):\n return index", "def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())", "def nanmin_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.nanmin(a[:, col])\n return out", "def clean_coordinates(coords, silent=False):\n l1 = len(coords)\n coords = coords[~N.isnan(coords).any(axis=1)] # remove NaN values\n l2 = len(coords)\n if not silent:\n msg = \"{0} coordinates\".format(l2)\n if l2 < l1:\n msg += \" ({0} removed as invalid)\".format(l1-l2)\n print(msg)\n return coords", "def get_undef_cols_idx(x, undef_val):\n undef_col_idx = []\n for col_idx in range(x.shape[1]):\n column = x[:, col_idx]\n if((column == undef_val).all()):\n undef_col_idx.append(col_idx)\n\n return undef_col_idx", "def filter_empty_genes(data, *extra_data):\n gene_sums = np.array(utils.matrix_sum(data, axis=0)).reshape(-1)\n keep_genes_idx = gene_sums > 0\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data", "def drop_high_nan(df, threshold=0.5):\n n_nans = df.isnull().sum()\n freq_nans = n_nans/float(len(df)) #in percentage\n to_drop = (freq_nans > threshold).values\n columns_drop = df.columns.values[to_drop].tolist()\n return df.drop(columns_drop, axis=1)", "def cnan(x):\n if np.isnan(x).sum()>0:\n import pdb\n pdb.set_trace()", "def remove_players_wo_positions(df):\n df = df[pd.notnull(df['FantPos'])]\n return df", "def test_remove_nan_observations(test_coords):\n x, y = test_coords[0], test_coords[1]\n\n z = np.array([np.nan, np.nan, np.nan, 1, 1, 1, 1, 1, 1, 1])\n\n x_, y_, z_ = remove_nan_observations(x, y, z)\n\n truthx = np.array([10, 52, 53, 98, 34, 15, 58])\n truthy = np.array([94, 98, 66, 14, 24, 60, 16])\n truthz = np.array([1, 1, 1, 1, 1, 1, 1])\n\n assert_array_almost_equal(truthx, x_)\n assert_array_almost_equal(truthy, y_)\n assert_array_almost_equal(truthz, z_)", "def remove_rows_with_null_values(df, not_null_columns=[]):\n\n if len(not_null_columns) == 0:\n not_null_columns = df.columns.to_list()\n\n total_rows_removed = 0\n for col in not_null_columns:\n rows_removed = len(df.loc[df[col].isnull()])\n #rows_removed = len(df.loc[df[col].isnull() is True])\n\n if rows_removed > 0:\n print(f\"{rows_removed} rows have been removed because of null values in column {col}\")\n total_rows_removed += rows_removed\n\n df = df.loc[df[col].isnull() is False]\n\n if total_rows_removed > 0:\n print(f\"\\nTotal rows removed because of null data: {total_rows_removed}\");\n else:\n print(\"No null data found\")\n\n return df", "def _index_to_nan_fast(data, existing_nans, to_nan):\n index_nan = []\n randgen = (np.random.choice(len(data)) for _ in cnt(start=1))\n for i in range(to_nan):\n ix = next(filter(lambda x: x not in existing_nans and x not in index_nan, randgen))\n index_nan.append(ix)\n data_imp = data.copy()\n data_imp[index_nan] = np.nan\n return data_imp, index_nan", "def _remove_nan(parsed_dictionary):\n for key, value in parsed_dictionary.items():\n if isinstance(value, np.ndarray):\n non_nan_value = np.nan_to_num(value, nan=123456789, posinf=2e308, neginf=-2e308)\n parsed_dictionary.update({key: non_nan_value})\n\n return parsed_dictionary", "def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]", "def fill_nans(data):\n for col in data.columns:\n data[col].fillna(-999, inplace=True)", "def deletingNaNs(df):\n # start_ time.time()\n df_old = df.copy()\n df.dropna(axis=1, how='any', inplace=True)\n for key in df_old:\n if str(key) not in df:\n print('Deleted ', key)\n # end_time time.time()\n #print('Time to run deletingNaNs: ', end_time - start_time)\n return df", "def remove_columns_missing_values(df, min_threshold):\n for col in df.columns:\n rate = sum(df[col].notnull())/float(len(df)) * 100\n if rate <= min_threshold:\n df = df.drop(col,1)\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def remove_filler(dgm, val=np.inf):\r\n inds = (dgm[:,0] != val)\r\n return dgm[inds,:]", "def remove_zero_features(df,no_zeros = 1):\n thing = df.astype(bool).sum(axis=0) # number of nonzeros in each column\n idx = pd.Index(thing) #Index format\n location = idx.get_loc(no_zeros) # Set all elements that are 1.0 to True, rest to False.\n loc_of_one = np.asarray(np.nonzero(location)) #Array of columns with only one nonzero element\n loc_of_one = loc_of_one[0]\n df_new = df.drop(df.columns[loc_of_one], axis = 1) # New reduced dataframe\n return df_new", "def _fix_uniq_col(self):\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n n_covered_col = self.a_csr.dot(np.ones(self.ncols)) \n ifix = np.zeros(self.ncols, dtype=bool)\n if (np.count_nonzero(n_covered_col) != self.mrows):\n raise ValueError(\"There are uncovered rows! Please check your input!\")\n if (np.any(n_covered_col==1)):\n inonzero = self.a_csr[n_covered_col==1,:].nonzero()\n ifix[inonzero[1]] = True\n\n return ifix", "def filter_zeros(X):\n\tnoNonzeros = np.count_nonzero(X, axis=1)\n\tmask = np.where(noNonzeros > 0)\n\treturn X[mask[0], :]", "def remove_nan(X, y):\n newX = []\n newY = []\n for i in range(0, len(X)):\n lst = X[i]\n lbl = y[i]\n flag = True\n for i in lst:\n if np.isnan(i):\n flag = False\n break\n if flag:\n newX.append(lst)\n newY.append(lbl)\n return np.array(newX), np.array(newY)", "def non_null_df(df, required_cols):\n return df.where(reduce(lambda x, y: x & y, (col(x).isNotNull() for x in required_cols)))", "def checkNaN(data):\n if data.isnull().values.any():\n N = data.isnull().sum().sum()\n print(\"There are {} missing values.\".format(N))", "def dataCleaner(dataframe):\r\n dataframe = dataframe.dropna(how='all')\r\n for col in dataframe:\r\n dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)\r\n dataframe[col] = dataframe[col].fillna(dataframe[col].mean())\r\n return dataframe", "def _drop_empty_rows_and_cols(confmat: Tensor) ->Tensor:\n confmat = confmat[confmat.sum(1) != 0]\n confmat = confmat[:, confmat.sum(0) != 0]\n return confmat", "def replace_nan(data_jets):\n \n data_mean = np.empty_like(data_jets)\n data_median = np.empty_like(data_jets)\n data_null = np.empty_like(data_jets)\n \n for jet in range(4):\n # Replace Remaining undefined values by Mean, median or zero\n data_mean[jet] = np.where(np.isnan(data_jets[jet]), np.nanmean(data_jets[jet], axis=0), data_jets[jet])\n data_median[jet] = np.where(np.isnan(data_jets[jet]), np.nanmedian(data_jets[jet], axis=0), data_jets[jet])\n data_null[jet] = np.where(np.isnan(data_jets[jet]), np.float64(0), data_jets[jet])\n \n return data_mean, data_median, data_null", "def removeNull(self):\n self.data.dropna(axis=0, how='any')", "def add_cdm_missing_columns(self, all_merged_obs):\n #cdm_keys = self.obs_table_columns \n nan_array = np.empty( all_merged_obs['observed_variable'].shape )\n nan_array[:] = np.nan\n for k in self.obs_table_columns:\n if k not in list(all_merged_obs.columns ):\n logging.debug('Adding missing cdm colum with empty values: %s' , k )\n all_merged_obs[k] = ( nan_array )\n \n return all_merged_obs", "def get_missing(self):\n missing_values = self.df[self.col_name].isnull().sum()\n return missing_values", "def filter_data(df, needed_columns, not_null_columns=[]):\n\n if all(x in needed_columns for x in not_null_columns):\n\n df = get_needed_columns(df, needed_columns)\n #Use the pandas method bc the other method was erroring on boolean index.\n #IM - 9/23/22\n df = df.dropna(subset=not_null_columns)#remove_rows_with_null_values(df, not_null_columns)\n\n return df\n else:\n print(\"Developer error, not null columns should be a subset of needed columns\")\n return df", "def _drop_projected_dims(dims):\n return [d for d in dims if d != 1]", "def omit_nans(self, data, label):\n maskarray=np.full(data.shape[0], True)\n masker=np.unique(np.argwhere(np.isnan(data))[:,0])\n maskarray[masker]=False\n traindata=data[maskarray,:,:,:]\n trainlabel=label[maskarray]\n return traindata, trainlabel", "def checkNaN(data_dict):\n for k, v in data_dict.iteritems():\n mark = True\n for feature, value in v.iteritems():\n if (value != 'NaN') and (feature != 'poi'):\n mark = False\n break\n if mark:\n print k\n print v['poi']", "def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset", "def analyze(tx):\n num_cols = tx.shape[1]\n print('\\nNumber of columns in the data matrix: ',num_cols)\n columns_to_remove = []\n print('Analysis of data:\\n')\n for col in range(num_cols):\n current_col = tx[:, col]\n if len(np.unique(current_col)) == 1:\n print('The column with index ', col, ' is all the same, it will be removed.')\n columns_to_remove.append(col)\n else:\n current_col[current_col == -999] = np.median(current_col[current_col != -999])\n # Handling the outliers\n std_current_col = np.std(current_col)\n mean_current_col = np.mean(current_col)\n lower_bound = mean_current_col - 2 * std_current_col\n upper_bound = mean_current_col + 2 * std_current_col\n current_col[current_col < lower_bound] = lower_bound\n current_col[current_col > upper_bound] = upper_bound\n print('Null values in the ', col, ' indexed column are replaced with the mean and outliers.')\n return columns_to_remove", "def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')", "def filter_nan_samples(self, train_x, train_y):\n\n n_samples = train_x.shape[0]\n if n_samples != train_y.shape[0]:\n raise ValueError(\"x and y sample lengths don't match\")\n\n validity_array = np.zeros(n_samples)\n for i in range(n_samples):\n x_sample = train_x[i, :]\n y_sample = train_y[i, :]\n validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()\n\n mask = np.where(validity_array)[0]\n\n return train_x[mask, :], train_y[mask, :]", "def remove_zero_bars(dgm):\r\n inds = dgm[:,0] != dgm[:,1]\r\n return dgm[inds,:]" ]
[ "0.68530303", "0.67052513", "0.669365", "0.6676535", "0.66358685", "0.6495949", "0.6456195", "0.64343864", "0.6338271", "0.63302857", "0.62897485", "0.62503976", "0.6189583", "0.61509717", "0.61466753", "0.6088549", "0.6019746", "0.5990693", "0.59600526", "0.5936195", "0.5896028", "0.588317", "0.5881776", "0.5853736", "0.5849152", "0.5827604", "0.58145475", "0.58126885", "0.58126885", "0.5795356", "0.5790728", "0.5775632", "0.5738607", "0.5737763", "0.5735166", "0.571973", "0.570108", "0.56866163", "0.56866163", "0.56866163", "0.56847095", "0.56634754", "0.5658789", "0.5655991", "0.5646318", "0.5644191", "0.56387347", "0.56378037", "0.56355834", "0.5628169", "0.5609957", "0.5604484", "0.5597666", "0.5580226", "0.5556368", "0.5555391", "0.5552145", "0.5552096", "0.5548292", "0.55470806", "0.5521893", "0.5514134", "0.5514113", "0.5507049", "0.5491801", "0.54851043", "0.54566693", "0.5454532", "0.54409236", "0.5435183", "0.5427829", "0.5425768", "0.5421683", "0.54189014", "0.54141086", "0.5410428", "0.5405868", "0.54031485", "0.5386328", "0.5379467", "0.5369935", "0.53657943", "0.5352108", "0.5351949", "0.53510064", "0.5348141", "0.5345933", "0.5337274", "0.5332404", "0.5326364", "0.5308742", "0.5295008", "0.5292991", "0.52909344", "0.5288658", "0.52756405", "0.52721614", "0.5267324", "0.5258949", "0.5255649" ]
0.7522396
0
Drop slice that contains only value from df.
def drop_uniform_slice_from_dataframe(df, value, axis=0): if axis == 0: dropped = (df == value).all(axis=0) if any(dropped): print('Removed {} column index(ices) whose values are all {}.'. format(dropped.sum(), value)) return df.ix[:, ~dropped] elif axis == 1: dropped = (df == value).all(axis=1) if any(dropped): print('Removed {} row index(ices) whose values are all {}.'.format( dropped.sum(), value)) return df.ix[~dropped, :]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_values(df, value=0, axis=0):\n \n if axis:\n return df.loc[:, (df != value).any(axis=1-axis)]\n else:\n return df.loc[(df != value).any(axis=1-axis)]", "def remove(df, pattern):\n return df[~df.index.isin(df.query(pattern).index)]", "def drop_transafers(df):\n return df.filter(~(df.valor == 0))", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]", "def df_cleaner(df):\n return df.dropna()", "def drop_missing_values_in_dataframe(dataframe):\r\n return dataframe.dropna()", "def remove_not_available(df):\n drop_indices = df.index[df['genre'] == 'Not Available'].tolist()\n df = df.drop(drop_indices)\n return df", "def drop_nan(df, perc=20):\n check = check_nan(df, show_plot=False)\n return df.drop(check[check.percentage > perc].index.values, axis=1)", "def df_cleaner(df):\n df = df.dropna()\n return df", "def clean(df):", "def remove_nan(self, dataframe):\n return dataframe.dropna()", "def drop_zero_pay(df):\n df = df.loc[df.payann > 0]\n return df", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def crop_amide_one(df):\n df = df[(df[df.columns[0]] < 1706) & (df[df.columns[0]] > 1599)]\n df.reset_index(drop=True, inplace=True)\n return df", "def get_subtable(df, col, val) -> pd.DataFrame:\r\n return df[df[col] == val].drop(columns=col)", "def drop_illogical(df,var1,var2):\r\n #Mask the illogical entries\r\n mask = df[var1]>df[var2]\r\n #Record the number of entries\r\n NumRecords = df.shape[0]\r\n #drop the illogical entries\r\n df = df[df.keys()][~mask]\r\n #Notify the user how many records were dropped\r\n print('{} records dropped because {} is greater than {}'.format(NumRecords-df.shape[0],var1,var2))\r\n \r\n return df", "def delete_entries(df, column, values):\n for val in values:\n dropindex = df[df[column] == val].index\n df.drop(index = dropindex, inplace = True)", "def remove_missing_values(df, col, exclude):\n if type(col) == 'list':\n try:\n for ind, c in enumerate(col):\n indices = df[df[c] == exclude[ind]].index\n df = df.drop(indices)\n except:\n print('Exception occurred, check kwargs')\n else:\n indices = df[df[col] == exclude].index\n df = df.drop(indices)\n return df", "def removeNonQuant(df, cols):\r\n df = df[~(df[cols].isnull().all(1))]\r\n return df", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def clean_rows_cat_values(df: pd.DataFrame, col: str, values: list) -> pd.DataFrame:\n\n # create mask to filter df with rows that have\n # the indicated values in the indicated column\n index = df.columns.get_loc(col)\n mask = [df.iloc[row, index] not in values for row in range(len(df))]\n\n # print original dataframe shape\n print(f\"Shape of the original dataframe: \\n{df.shape}\\n\")\n\n # filter df\n df = df.iloc[mask]\n df.reset_index(drop=True, inplace=True)\n print(\n f\"Shape after removing rows with values equal to\\n{values}\\nin column '{col}'':\"\n )\n print(df.shape, \"\\n\")\n\n return df", "def deselect (a_data,a_column) :\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def drop_outliers(target_df, settings):\n target_df.sort_values(list(target_df.columns), inplace=True)\n startlen = target_df.shape[0]\n if settings[\"drop_outlier_above\"] < 1:\n target_df = target_df.iloc[: int(np.floor(startlen * settings[\"drop_outlier_above\"])), :]\n if settings[\"drop_outlier_below\"] > 0:\n target_df = target_df.iloc[int(np.floor(startlen * settings[\"drop_outlier_below\"])) :, :]\n return target_df", "def _truncate_games_df(df, season, season_type):\n return df[(df['season'] != season) | (df['type'] != season_type)]", "def remove_players_wo_positions(df):\n df = df[pd.notnull(df['FantPos'])]\n return df", "def dataendclean(df, x, inplace=False):\r\n # Examine Mean Values\r\n if inplace:\r\n df = df\r\n else:\r\n df = df.copy()\r\n\r\n jump = df[abs(df.loc[:, x].diff()) > 1.0]\r\n try:\r\n for i in range(len(jump)):\r\n if jump.index[i] < df.index[50]:\r\n df = df[df.index > jump.index[i]]\r\n printmes(\"Dropped from beginning to \" + str(jump.index[i]))\r\n if jump.index[i] > df.index[-50]:\r\n df = df[df.index < jump.index[i]]\r\n printmes(\"Dropped from end to \" + str(jump.index[i]))\r\n except IndexError:\r\n printmes('No Jumps')\r\n return df", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def drop_nonserious_rows(table, column_name):\n # encode table's nan as None\n for column in table.labels:\n encode_nans(table, column)\n full_df = table.to_df()\n start_idx = table.column_index(column_name)\n tbl = table.select(range(start_idx, table.num_columns))\n df = tbl.to_df()\n na_df = df.notna()\n full_df = full_df[np.array(na_df.apply(np.sum, axis=1) != 0)]\n return Table.from_df(full_df)", "def drop_high_nan(df, threshold=0.5):\n n_nans = df.isnull().sum()\n freq_nans = n_nans/float(len(df)) #in percentage\n to_drop = (freq_nans > threshold).values\n columns_drop = df.columns.values[to_drop].tolist()\n return df.drop(columns_drop, axis=1)", "def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]", "def deletingNaNs(df):\n # start_ time.time()\n df_old = df.copy()\n df.dropna(axis=1, how='any', inplace=True)\n for key in df_old:\n if str(key) not in df:\n print('Deleted ', key)\n # end_time time.time()\n #print('Time to run deletingNaNs: ', end_time - start_time)\n return df", "def drop_quasi_zero(df, thresh=0.05):\n drop_list = []\n for el in df.columns.values:\n non_zero = df[el][df[el] != 0].shape[0] / df.shape[0]\n if non_zero < thresh:\n drop_list.append(el)\n print('Dropping column: {} | Non-zero values ratio: {}%'.format(\n el, round(100 * non_zero, 3)))\n return df.drop(drop_list, axis=1)", "def drop_indices(self, df) -> None:\n assert self.is_appropriate_data_instance(df)\n # no operation needed", "def only_positive_values(df):\n\n\n only_positive_cols_bool = (df <= 0).any()\n only_positive_cols = only_positive_cols_bool[~only_positive_cols_bool].index\n positive_df = df[only_positive_cols]\n\n return positive_df", "def drop_irrelevant_practices(df):\n\n is_relevant = df.groupby(\"practice\").value.any()\n return df[df.practice.isin(is_relevant[is_relevant == True].index)]", "def drop_nans(target_df):\n target_df.dropna(axis=0, inplace=True)\n return target_df", "def remove_rows(df, threshold, log=False):\n if log: section_timer = Timer(log=f\"removing rows with more than {threshold * 100}% of NaNs\")\n\n non_nan_values = int(df.shape[1] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=0)\n\n if log: section_timer.end_timer(log=f\"removed {df.shape[0] - df_clean.shape[0]} rows\")\n return df_clean", "def remove_filler(dgm, val=np.inf):\r\n inds = (dgm[:,0] != val)\r\n return dgm[inds,:]", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def remove_blank_rows(df):\n clean_df = df.filter(df.columns[1:], axis=1).dropna(how='all', axis=0)\n idx_to_keep = clean_df.index\n clean_df = df.filter(idx_to_keep, axis=0)\n return clean_df", "def remove_outliers(df, var):\n import numpy as np\n \n df = df.copy()\n \n # remove outliers\n Q1 = np.nanquantile(df[var] ,0.25)\n Q3 = np.nanquantile(df[var], 0.75)\n IQR = Q3 - Q1\n \n lower_end = Q1 - 1.5 * IQR \n high_end = Q3 + 1.5 * IQR \n \n df_filtered = df.drop(df[(df[var] < lower_end) | (df[var] > high_end)].index)\n \n return df_filtered", "def trim_df(df, num_insts):\n df2 = df.copy() # the selected field should not appear in the original `df`\n df2['selected'] = False # initialize all instances to not selected\n classes = df2.groupby('class_label') # group instances by class\n trim_part = partial(trim, n=num_insts) # partial trim to n=NUM_INSTS\n df2['selected'] = classes.selected.transform(trim_part) # mark as selected\n selected = df[df2.selected] # get the selected instances\n return selected", "def remove_columns_missing_values(df, min_threshold):\n for col in df.columns:\n rate = sum(df[col].notnull())/float(len(df)) * 100\n if rate <= min_threshold:\n df = df.drop(col,1)\n return df", "def purgeNanEveryWhere(df):\n #Row-wise dropping\n toDrop = np.array([])\n for i in range(df.shape[0]):\n if( np.sum ( pd.isnull(df.iloc[i]) ) == df.shape[1]-1 ):\n toDrop= np.append(toDrop,i)\n df.drop(df.index[toDrop.astype(int)],inplace=True) \n #Column-wise dropping\n for col in df.columns:\n arr = pd.notnull(df[col])\n nnan = np.sum(arr) \n if (nnan == df.shape[1]):\n df.drop(col,inplace=True,axis=1)\n return df", "def drop_transactions(df, station, access_point = None):\n\n if access_point is not None:\n station_name = df.nombreestacion.contains(station)\n entrance = df.nombreaccesoestacion.contains(access_point)\n filters = station_name & entrance\n else:\n filters = df.nombreestacion.contains(station)\n return df.filter(~filters)", "def drop_duplicates(df: pd.DataFrame, subset: list = None) -> pd.DataFrame:\n\n # drop duplicates if there is any\n\n df_sub = df[subset] if subset else df\n if df_sub.duplicated().any():\n df.drop_duplicates(subset=subset, keep=\"last\", inplace=True)\n print(f\"Shape after dropping duplicated rows:\\n{df.shape}\\n\")\n else:\n print(\"There isn't duplicated data.\")\n\n return df", "def column_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0] #a solved location in a column\n if location in location_dict.keys():\n \n #ensure that multiple groups can be in multiple locations using period_loc_frequency\n loc_freq = 0\n loc_freq -= 1 #subtract one for the current location usage\n loc_freq += period_loc_frequency[location]\n \n for other_col in column_dict[box]:\n if other_col in solved_values:\n if values[other_col] == location:\n loc_freq -= 1\n \n #make sure that too many locations haven't been used up yet\n if loc_freq < 0:\n print(\"error: too many groups in location\", location)\n \n #if the location is \"used up\", remove it as an option from the rest of the groups\n if loc_freq == 0:\n for other_col in column_dict[box]:\n try:\n values[other_col].remove(location) #remove the location from the other column units\n except:\n pass\n \n return values", "def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def test_drop_zero_variance_on_subset_columns(data):\n step = DropZVColumnsStep(['name', 'released'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns", "def trim_dataframe(self) -> pd.DataFrame:\n self.remove_below_lower_length_limit()\n self.trim_to_upper_length_limit()\n return self.data", "def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns", "def drop_rows_with_outliers(df, columns, sigma=3):\n selection = np.full(len(df.index), True, dtype=np.dtype('bool'))\n if not isinstance(columns, list):\n columns = [columns]\n for var in columns:\n std_var = np.std(df[var])\n mean_var = np.mean(df[var])\n in_range = np.logical_and(df[var] > mean_var - sigma*std_var,\n df[var] < mean_var + sigma*std_var)\n selection = np.logical_and(selection, in_range)\n return df[selection]", "def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def keep_age_range(df, mode):\n # Note: this is a side effect; just the simplest place to remove these\n cols_to_drop = []\n for extra_col in [\"category\", \"colors\", \"patterns\", \"sort_order\"]:\n if extra_col in df.columns:\n cols_to_drop.append(extra_col)\n df = df.drop(columns=cols_to_drop)\n if mode == \"adults\":\n return df[df[\"ageyears\"].between(18, 80, inclusive=\"both\")]\n elif mode == \"pediatrics\":\n return df[df[\"ageyears\"].between(0, 25, inclusive=\"both\")]\n else:\n return df", "def dropna(df, axis=0, th=0.4):\n df = df.copy()\n axis = 0 if axis==1 else 1\n col_idx = df.isna().sum(axis=axis)/df.shape[axis] <= th\n df = df.iloc[:, col_idx.values]\n return df", "def drop_warn_na(df: pd.DataFrame) -> pd.DataFrame:\n\n n_0 = len(df)\n df = df.dropna(how=\"any\")\n\n if len(df) < n_0:\n warnings.warn(\n f\"NA values found in the dataframe,\" f\" {n_0 - len(df)} rows removed.\"\n )\n\n return df", "def _remove_non_informative_rows(self, df, threshold):\n df_tmp = pd.DataFrame()\n n_features = len(df.columns)\n # calculating ratio of rows that have more than \"ratio\" missing values\n df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')\n\n # kick too noisy rows\n return df[df_tmp['ratio'] <= threshold]", "def remove_zero_features(df,no_zeros = 1):\n thing = df.astype(bool).sum(axis=0) # number of nonzeros in each column\n idx = pd.Index(thing) #Index format\n location = idx.get_loc(no_zeros) # Set all elements that are 1.0 to True, rest to False.\n loc_of_one = np.asarray(np.nonzero(location)) #Array of columns with only one nonzero element\n loc_of_one = loc_of_one[0]\n df_new = df.drop(df.columns[loc_of_one], axis = 1) # New reduced dataframe\n return df_new", "def df_drop_cols(df, col_keep_ls, inplace=True):\n import pandas\n \n vdf_mem_map = isinstance(df, vaex.hdf5.dataset.Hdf5MemoryMapped)\n vdf_df_arr = isinstance(df, vaex.dataframe.DataFrameArrays)\n\n if (vdf_mem_map) or (vdf_df_arr):\n all_col_names = set(df.column_names)\n elif isinstance(df, pandas.core.frame.DataFrame):\n all_col_names = set(df.columns)\n \n col_keep_set = set(col_keep_ls)\n col_drop_set = all_col_names - col_keep_set\n \n for col in col_drop_set:\n if (vdf_mem_map) or (vdf_df_arr):\n df.drop(col, inplace=inplace)\n elif isinstance(df, pandas.core.frame.DataFrame):\n df.drop(col, axis=1, inplace=inplace)", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def data_deletion(data, columnX=\"time\", columnY=\"forceX\"):\n\n subset = data_selection(data, columnX, columnY)\n\n data = data.drop(subset.index)\n return data", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def drop_years(dataframe, start, end):\n tmp = dataframe\n tmp = tmp[(start <= tmp['year'].astype(int)) & (tmp['year'].astype(int) <= end)]\n\n return tmp", "def remove_closed_store(data: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame:\n closed_store = pd.read_csv('../data/closedStore/Closed stores list.csv')\n closed_store_list = closed_store['Store'].unique()\n \n for store in closed_store_list:\n data = data[data.Store !=store] # remove the colsed store\n return data", "def remove_reserved_keys(df, exclude=[]):\n reserved_keys = __column_intersect(\n df, BAMBOO_RESERVED_KEYS).difference(set(exclude))\n\n return df.drop(reserved_keys, axis=1)", "def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)", "def remove(df,column_to_filter,standard_deviations=3):\n import math\n #This function will flatten the row of the dataframe\n def flatten_column(row):\n return tuple(float(x) for x in row)\n stats = df.select(column_to_filter).rdd.flatMap(flatten_column).stats()\n mean = stats.mean()\n variance = stats.variance()\n stddev = math.sqrt(variance)\n stddev_threshhold = stddev*standard_deviations\n print(stddev_threshhold)\n from pyspark.sql.functions import lit\n df = df.where(\"abs({column_to_filter} - {mean}) > {stddev_threshhold}\"\\\n .format(column_to_filter=column_to_filter,mean=mean,stddev_threshhold=stddev_threshhold))\n return df", "def drop_columns(df, exclusion):\n for c in df.columns.values:\n if c not in exclusion:\n df.drop(c, axis=1, inplace=True)\n return df", "def cut_frame_tail(df):\n # TODO\n return df", "def dropna(*args):\n # (True|False) mask for each row based on NaN values present in the first dataframe\n mask = ~pd.isnull(args[0])\n if mask.ndim == 2:\n mask = mask.all(1) # in 2d array, keep row only if all values are not NaN\n\n return [x.loc[mask] if isinstance(x, pd.DataFrame) else x[mask] for x in args]", "def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df", "def clip(df, clip_val_low, clip_val_high):\n clipped_df = df.clip(lower=clip_val_low, upper=clip_val_high)\n return clipped_df", "def trim_data(data, attributes):\n return data.drop(attributes, axis=1)", "def get_cols_drop():", "def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data", "def execute_drop_strategy(dataframe, arg_drop_count):\n\n logging.debug('>>>>>>>>> Using drop rows strategy <<<<<<<<<<<<')\n\n selected_drop_rows = MAX_ROW_TO_SHOW if arg_drop_count is None else arg_drop_count\n\n if selected_drop_rows == 1:\n dataframe = dataframe.drop(dataframe.index[0], inplace=True)\n else:\n dataframe.drop(dataframe.index[0:selected_drop_rows], inplace=True)", "def unmatching_driver_id(df):\n\ttemp = df[df['driver_id_bkg'].notnull()]\n\torder_ids = temp[temp['driver_id_bkg'] != temp['driver_id_pnt']]['order_id'].values\n\treturn df[~df['order_id'].isin(order_ids)]", "def filter_rows_by_non_empty_until(df, max_=MAX_NUM_ROWS):\n print('Starting shape: %s' % str(df.shape))\n threshold = 1\n while df.shape[0] > max_:\n df = filter_rows_by_non_empty(df, threshold=threshold)\n print('THRESHOLDING: to shape: %s' % str(df.shape))\n threshold += 1\n print('Ending shape: %s' % str(df.shape))\n return df", "def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df", "def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def cleaning_Dataset(dataset):\n cols = dataset.select_dtypes([np.number]).columns\n diff = dataset[cols].diff().sum()\n\n dataset = dataset.drop([diff==0].index, axis=1)\n dataset = dataset.drop('adj close', 1)\n dataset = dataset.fillna(method='bfill')\n dataset = dataset[1:-1]\n return dataset", "def remove_empty_rows(dataframe: pd.DataFrame, column_name: str):\n original_size = len(dataframe)\n dataframe[column_name].replace(\" \", np.nan, inplace=True)\n dataframe[column_name].replace(\"\", np.nan, inplace=True)\n dataframe.dropna(subset=[column_name], inplace=True)\n dataframe.reset_index(drop=True, inplace=True)\n new_size = len(dataframe)\n print(f\"A total of {original_size - new_size} rows were dropped\")", "def dropIndices(df, indices):\n df_result = df.copy()\n sorted_indices = list(indices)\n sorted_indices.sort()\n sorted_indices.reverse()\n for idx in sorted_indices:\n df_result = df_result.drop(idx, axis=0)\n return df_result", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def dropna(self, subset, inplace=False):\n if type(subset) is str:\n subset = [subset]\n if not inplace:\n filtered = self._data.dropna(subset=subset)\n return self._copy(filtered)\n else:\n self._data.dropna(subset=subset, inplace=True)", "def _remove_rows(df, num):\n if num == 0:\n return df.copy()\n\n to_remove = np.random.choice(df.index.values, num)\n to_keep = df.index.difference(to_remove)\n\n return df.loc[to_keep]", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def test_slice_delslice_forbidden(self):\n global setVal\n class foo:\n def __delslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __delitem__(self, index):\n global setVal\n setVal = index\n\n del foo()[::]\n self.assertEqual(setVal, slice(None, None, None))\n del foo()[::None]\n self.assertEqual(setVal, slice(None, None, None))", "def drop_nan_streaks_above_threshold(df, df_nan_table, thresholds):\n\n # Check for NaN streaks > threshold and drop them from the df\n length = len(df_nan_table['Amount of NaNs'])\n print('df_nan_table length: %s' % length)\n\n indices_to_drop = []\n for i, amount in enumerate(df_nan_table['Amount of NaNs']):\n selected_column = df_nan_table['Column name'][i]\n try:\n if amount > thresholds[selected_column]:\n start_index = (df_nan_table['Start index'][i])\n stop_index = (df_nan_table['Stop index'][i])\n indices = df[start_index:stop_index].index\n print('Enumeration %s of %s | From \\t %s \\t to \\t %s | column %s | NaN streak length: %s'\n % (i, length, start_index, stop_index, selected_column, (len(indices))))\n try:\n indices_to_drop += indices\n except:\n print('Could not add indices to indices_to_drop list')\n else:\n #print('amount < threshold')\n pass\n except:\n #print('No threshold detected for %s' % selected_column)\n pass\n\n print('Dropping NaN streaks > threshold')\n l1 = len(df)\n df = df.drop(indices_to_drop)\n l2 = len(df)\n print('Removed %s rows' % (l1-l2))\n return df", "def dropna(self, number=10000, axis=1, inplace=False):\n length = self.df.shape[axis - 1] # -1 ¿porque era esto?\n df_resultado = self.df.dropna(thresh=length - number, axis=axis)\n valor = self._inplace(\"df\", df_resultado, inplace)\n return valor", "def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df", "def drop_uninformative_columns(df: pd.DataFrame) -> pd.DataFrame:\n for column, value in uninformative_columns(df):\n logger.debug(\n \"Dropping column %r from DataFrame (every value %s %r)\",\n column,\n \"is\" if isinstance(value, float) and np.isnan(value) else \"=\",\n value,\n )\n df = df.drop(column, axis=\"columns\")\n return df", "def filter_rows_by_non_empty(df, threshold=1):\n # Boolean DataFrame where `True` means the cell value is non-zero.\n non_zeros = df.applymap(lambda cell: cell != 0)\n\n # Boolean Series where `True` means the row has enough non-zeros.\n enough_non_zeros = non_zeros.apply(\n # Check that the row contains `True`, meaning it has a non-zero.\n # check that the row has enough non-zeros, i.e. more than the threshold.\n lambda row: True in row.value_counts() and row.value_counts()[True] > threshold,\n axis=1\n )\n result = df[enough_non_zeros]\n if df.shape != result.shape:\n print('THRESHOLDING: filter_rows_by_non_empty')\n return result" ]
[ "0.6749588", "0.659988", "0.6581077", "0.64866114", "0.64584786", "0.64093405", "0.62364835", "0.6224", "0.6223723", "0.6217032", "0.6179184", "0.6157227", "0.61414427", "0.6132971", "0.6130289", "0.6113223", "0.610663", "0.60877687", "0.60699916", "0.6052917", "0.6052265", "0.60467917", "0.59864885", "0.5985572", "0.59468275", "0.59339845", "0.5929148", "0.59261584", "0.588431", "0.58636737", "0.5837513", "0.5812451", "0.58101314", "0.57569736", "0.5755521", "0.5747306", "0.5738144", "0.572803", "0.57136714", "0.5706878", "0.56925833", "0.5689834", "0.56853783", "0.5678557", "0.56490296", "0.56452996", "0.5616968", "0.56152743", "0.5606004", "0.5591796", "0.55664617", "0.55656683", "0.5563449", "0.556325", "0.5560019", "0.5559101", "0.5547681", "0.55458194", "0.55325055", "0.5503676", "0.5502719", "0.54997826", "0.54980236", "0.54862976", "0.54781306", "0.54781306", "0.54775935", "0.5475052", "0.54735565", "0.5466329", "0.54351026", "0.54286504", "0.54230624", "0.54088897", "0.53955704", "0.53895134", "0.5374752", "0.53718793", "0.53711104", "0.53663254", "0.5359297", "0.53468704", "0.53467435", "0.5344104", "0.5328832", "0.5326686", "0.5325504", "0.53213257", "0.53143305", "0.5310752", "0.5297523", "0.5284338", "0.5284165", "0.52834475", "0.5279437", "0.5275972", "0.5275282", "0.52612334", "0.52529466", "0.52526575" ]
0.75481135
0
Split df into n_split blocks (by row).
def split_dataframe(df, n_split, axis=0): # TODO: implement axis logic if df.shape[0] < n_split: raise ValueError( 'n_split ({}) can\'t be greater than the number of rows ({}).'. format(n_split, df.shape[0])) elif n_split <= 0: raise ValueError('n_split ({}) can\'t be less than 0.'.format(n_split)) n = df.shape[0] // n_split splits = [] for i in range(n_split): start_i = i * n end_i = (i + 1) * n splits.append(df.iloc[start_i:end_i, :]) i = n * n_split if i < df.shape[0]: splits.append(df.ix[i:]) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def getKSplits(df, n_splits, seed = None):\n\n result = []\n\n # None random seed is same as not setting it\n df_shuffled = df.sample(len(df), random_state = seed)\n\n fold_size = int(len(df) / n_splits)\n\n for i in range(n_splits):\n if i == n_splits - 1: # last iteration\n df_fold = df_shuffled[fold_size * (i): len(df)] # gets remainder\n else:\n df_fold = df_shuffled[fold_size * (i):fold_size * (i + 1) ] # python starts indexing at 0\n result.append(df_fold)\n\n return result", "def dataFrameSplit(df, norec=1000000, outfile= None):\n # calculation of the no. of rows of the dataframe\n df_rsz = len(df.index)\n if df_rsz>norec:\n no_splits = np.ceil(df_rsz/norec)\n dfarr = np.array_split(df,no_splits)\n return dfarr\n else:\n print(\"The dataframe doesn't have sufficient records\")\n \n # printing to disk when \n if outfile!=None:\n i=0\n for arr in dfarr:\n arr.to_csv(\"D:\\\\ddf\"+str(i+1)+\".csv\",encoding='utf-8', index=False,\n header=False)\n i = i+1", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def prepare_stops_to_request(df: pd.DataFrame) -> list:\n return [split_df(df, i, i + 100) for i in range(0, len(df), 100)]", "def split_df(df,\n test_size=.10,\n random_state=42):\n train_df, test_df = train_test_split(df,\n test_size=test_size,\n random_state=random_state)\n return train_df, test_df", "def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks", "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def get_n_splits(self):\n pass", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df", "def getWindows(df, size=75, step=15):\n start = 0\n while start+size < df.count():\n yield start, start + size #pd.to_timedelta(size, unit='m'))\n start += step", "def add_split_col ( self, arr ):\n arr = arr if not self.shuffle else np.random.permutation ( arr )\n n = len ( arr )\n k = int ( np.ceil ( n / self.number_of_folds ) )\n return pd.DataFrame (\n { \"index\": arr, \"split\": np.tile ( np.arange ( self.number_of_folds ), k )[ 0:n ] , }\n )", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def test_03_dataframe_to_dataframe_w_chunksize(self):\n _, err = _iquery(\"store(flatten(DF1, cells_per_chunk:5), DF3)\")\n assert not err, err\n self._array_cleanups.append('DF3')\n check_v_sum('DF3')\n nchunks = chunk_count(vaid_of('DF3'))\n prt(\"DF3 has\", nchunks, \"chunks\")\n assert nchunks < self._df1_chunks, \"DF3 did not get dense!\"", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def df_split_random(df, N, random_seed=None):\n random = np.random.RandomState(random_seed)\n\n all_local_indices = np.arange(len(df))\n shuffled = random.permutation(all_local_indices)\n\n df1 = df.iloc[shuffled[:N]]\n df2 = df.iloc[shuffled[N:]]\n return df1, df2", "def split_time_series_by_time_steps_index(df, n_time_steps=None):\n\t\n\ttime_steps = df.index.get_level_values(1).unique()\n\tsplit_time_step = time_steps[-n_time_steps]\n\ttrain = df[df.index.get_level_values(1) < split_time_step]\n\ttest = df[df.index.get_level_values(1) >= split_time_step]\n\t\n\treturn train, test", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices", "def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def split_to_batches(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]", "def _partitionize(df, settings, grids, frag):\n column = settings['feature']\n if len(df) > 0:\n init, end, end2 = grids\n tmp = df.apply(lambda row: _inblock(row, column, init, end), axis=1)\n tmp = df.loc[tmp]\n\n if len(frag) > 0:\n frag = pd.concat([frag, tmp])\n else:\n frag = tmp\n return frag", "def sample_rows(df, nrows):", "def train_test_split(df, frac):\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test", "def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df", "def _split(self, c, n):\n\tsubsets = []\n\tstart = 0\n\tfor i in range(n):\n\t subset = c[start:start + (len(c) - start) / (n - i)]\n\t subsets.append(subset)\n\t start = start + len(subset)\n\treturn subsets", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def _chunks(l, ncols):\n assert isinstance(ncols, int), \"ncols must be an integer\"\n for i in range(0, len(l), ncols):\n yield l[i: i+ncols]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df", "def df_division(data, col_name, n_group=5, ascending=False):\n assert col_name in data.columns, '{} is not in columns of data!'.format(col_name)\n assert data[col_name].dtype == 'float' or data[col_name].dtype == 'int', \\\n 'type of {} is not comparable!'.format(col_name)\n\n data.reset_index(drop=True, inplace=True)\n rows = data.shape[0]\n rows_each_group = rows // n_group\n data.sort_values(by=col_name, ascending=ascending, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\n division = []\n for i in range(n_group):\n if not i == n_group-1:\n division.append(data.iloc[i * rows_each_group: (i+1) * rows_each_group, :])\n else:\n division.append(data.iloc[i * rows_each_group:, :])\n\n return division", "def train_test_split(df, random_state=42):\n if len(df) < 3:\n print('no bueno')\n train, test = train_test_split(df, test_size=.2, random_state=random_state)\n train, val = train_test_split(train, test_size=.2, random_state=random_state)\n return train, test, val", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def split_dataframe(df:\"pandas.DataFrame, pandas.Series\", sections:\"int\"=5, drop_index:\"bool\"=True, output:\"str\"=\"dataframe\")-> \"None or pandas.DataFrame\":\n import numpy as np\n from IPython.display import display_html\n \n if sections <= 0:\n raise ValueError('number sections must be larger than 0.')\n \n ### Find out how to keep column names when dropindex=True\n ### if series, dont allow drop index?\n \n ### allow passing in of desired column names as an array of strings (will result\n ### in dup col names but it won't matter if its only being displayed and not used in calculations)\n\n if isinstance(df, pandas.Series):\n df = df.to_frame()\n \n if drop_index:\n df.reset_index(drop = True, inplace=True)\n else:\n df.reset_index(level=0, inplace=True)\n\n df_split = np.array_split(df, sections)\n num_rows = [column.shape[0] for column in df_split]\n \n if output == \"dataframe\":\n \n alldata = [column.values.tolist() for column in df_split]\n \n # Add empty rows to each DataFrame until all DataFrames have the same number of rows\n for i in range(len(alldata)):\n while len(alldata[i]) < max(num_rows):\n alldata[i].append([\"\"]*df.shape[1])\n\n # Create rows of values across all of the DataFrames in alldata\n # When each entire row is created, add it to the output DataFrame\n dataframe = [] # <-- Output DataFrame\n for row_index in range(max(num_rows)):\n across_row = []\n for dataf in alldata:\n across_row.extend(dataf[row_index])\n dataframe.extend([across_row])\n \n return pandas.DataFrame(data=dataframe)\n \n if output == \"html\":\n strHtml = ''\n for x in split_dataframe:\n strHtml += x.to_html()\n display_html(strHtml.replace('table','table style=\"display:inline\"'), raw=True)", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def _shuffle(df, indices, chunksize):\n i = 0\n partition = []\n while len(indices) > chunksize:\n oids = df.reindex(indices[:chunksize])\n partition.append(oids)\n indices = indices[chunksize:]\n i += 1\n else:\n oids = df.reindex(indices)\n partition.append(oids)\n return partition", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def split_dataframe(df, split_elements_list):\n y = df.filter(split_elements_list)\n x = df.drop(split_elements_list, axis=1)\n\n return x, y", "def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def get_chunks(self, l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def split_on_divided_integer(\n df, column_name: str, divisor: int, batch_identifiers: dict\n ):\n matching_divisor = batch_identifiers[column_name]\n res = (\n df.withColumn(\n \"div_temp\",\n (F.col(column_name) / divisor).cast(pyspark.types.IntegerType()),\n )\n .filter(F.col(\"div_temp\") == matching_divisor)\n .drop(\"div_temp\")\n )\n return res", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def _chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def split_into_frames(num_frames, data):\n inds = collections.defaultdict(list)\n for i, frame_id in enumerate(data[:, FRAME_ID_COLUMN]):\n inds[frame_id].append(i)\n return [data[inds[t + 1], :] for t in range(num_frames)]", "def train_test_split(df, test_size=0.1):\n ntrn = int(round(len(df) * (1 - test_size)))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def single_cv(df, n_splits=3, val_size=2, seed=0xBadCafe):\n folds = sorted(df.fold.unique())\n split = []\n for f in folds:\n idx_b = df[df.fold == f].index.tolist()\n cv_b = train_val_test_split(idx_b, val_size=val_size, n_splits=n_splits, random_state=seed)\n for cv in cv_b:\n split.append(cv)\n return split", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists", "def chunks(parts, n):\n for i in range(0, len(parts), n):\n yield parts[i:i+n]", "def _get_splits(self, n_examples, seed):\n\n if seed is not None:\n rng = default_rng(seed)\n else:\n rng = default_rng()\n\n data_rows = list(range(n_examples))\n rng.shuffle(data_rows)\n\n split_rows = [data_rows[pair[0] : pair[1]]\n for pair in self.split_indices]\n\n return split_rows", "def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]", "def split ( self, y, X = None ):\n # Make sure y is an array\n y = np.array ( y ) if isinstance ( y, list ) else y\n\n # Groupby y and add integer indices.\n df_with_split = (\n pd.DataFrame ( { \"y\": y, \"index\": np.arange ( len ( y ) ) } )\n .groupby ( \"y\" ) [ \"index\" ]\n .apply ( self.add_split_col ) # Add col for split for instance\n )\n\n # For each fold, get train and test indices (based on col for split)\n for cv_split in np.arange ( self.number_of_folds - 1, -1, -1 ):\n train_bool = df_with_split [ \"split\" ] != cv_split\n test_bool = ~ train_bool\n # Yield index values of not cv_split and cv_split for train, test\n yield df_with_split [ \"index\" ].values [ train_bool.values ], df_with_split [\n \"index\"\n ].values [ test_bool.values ]\n # End split()", "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def __init__(self, df, first_n=100, last_n=5, min_len=200):\n games = list(df.groupby('game_id'))\n self.data = []\n for g_id, g in games:\n if g.shape[0] >= min_len:\n self.data.append(g)\n\n self.length = len(self.data)\n self.first_n = first_n\n self.last_n = last_n", "def fold(nb_splits, dataset):\r\n index = np.arange(np.shape(dataset)[0])\r\n splits = np.split(index, nb_splits)\r\n\r\n index = []\r\n\r\n for n_fold in np.arange(nb_splits):\r\n index.append((splits[n_fold].tolist(),(np.concatenate([x for i,x in enumerate(splits) if i!=n_fold])).tolist()))\r\n\r\n return index", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def divide_chunks(a_list, n):\n return [a_list[i:i + n] for i in range(0, len(a_list), n)]", "def chunks(self, l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n): # noqa: E741\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def build_chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i:i+n]", "def get_n_splits(self):\n return self.n_folds", "def split_population_wise(self, n:int, remove_day4: bool=True):\n X_test = []\n X_train = [] \n y_test = [] \n y_train = [] \n \n # Splitting\n dfs = deepcopy(self.dataframes)\n shuffle(dfs)\n df_test = dfs[:n]\n df_train = dfs[n:]\n\n for df in df_train:\n X, x, Y, y = self.__split_df(df, 0.0, remove_day4, False)\n X_train = X_train + X\n y_train = y_train + Y\n\n for df in df_test:\n X, x, Y, y = self.__split_df(df, 0.0, remove_day4, False)\n X_test = X_test + X\n y_test = y_test + Y\n\n self.X_train = np.asarray(X_train)\n self.X_test = np.asarray(X_test)\n self.y_train = np.asarray(y_train)\n self.y_test = np.asarray(y_test)", "def create_split_bounds(N, train_pct):\n train_len = int(round(train_pct * N))\n if ((N - train_len) % 2) != 0:\n train_len += 1\n\n # NOTE: We're assume the dev and test set are equal in length.\n test_len = dev_len = int((N - train_len) / 2)\n\n assert \"Not all data points are being used. Check create_split_bounds()\", \\\n (train_len + test_len + dev_len) == N\n\n return train_len, dev_len, test_len", "def split_data(x, y, ratio, index=None):\n m = x.shape[0]\n splitter = np.cumsum(ratio)\n train_start = 0\n val_start = batch_size * ((splitter[0] * m) // batch_size)\n test_start = batch_size * ((splitter[1] * m) // batch_size)\n test_end = batch_size * ((splitter[2] * m) // batch_size)\n\n val_start = int(val_start)\n test_start = int(test_start)\n test_end = int(test_end)\n\n if index is not None:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n index[train_start:val_start],\n x[val_start:test_start, :], y[val_start:test_start, :],\n index[val_start:test_start],\n x[test_start:test_end, :], y[test_start:test_end, :],\n index[test_start:test_end]\n )\n\n\n\n else:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n x[val_start:test_start, :], y[val_start:test_start, :],\n x[test_start:test_end, :], y[test_start:test_end, :]\n )\n\n return split", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def chunks(cls, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def DivideDF(df_all):\n return df_all.iloc[:df_all.trn_len], df_all.iloc[df_all.trn_len:]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def SetNbSplitPoints(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ClosedFaceDivide_SetNbSplitPoints(self, *args)", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]" ]
[ "0.80189484", "0.7166912", "0.68179005", "0.67057437", "0.6623575", "0.6451587", "0.6366774", "0.6241725", "0.61946136", "0.6189358", "0.61609524", "0.6113555", "0.6094658", "0.6066726", "0.6060644", "0.6053875", "0.60479224", "0.5984508", "0.59791416", "0.59433573", "0.59427154", "0.5933734", "0.5932551", "0.5930089", "0.59288275", "0.592263", "0.5899354", "0.5891334", "0.5874397", "0.5872169", "0.58683056", "0.5862896", "0.58213085", "0.5813502", "0.5812785", "0.5811433", "0.5770038", "0.57667905", "0.5744971", "0.5744932", "0.57423353", "0.5737778", "0.57233757", "0.57104194", "0.57036275", "0.5700768", "0.56910026", "0.569086", "0.56703085", "0.56689066", "0.56652945", "0.5664826", "0.5633128", "0.56102186", "0.5607914", "0.55847335", "0.55847335", "0.55847335", "0.5582229", "0.5581303", "0.5573154", "0.55643624", "0.5563879", "0.5560077", "0.5547464", "0.55474144", "0.5545349", "0.55441463", "0.55409783", "0.5533239", "0.5531409", "0.55267704", "0.55259717", "0.5515115", "0.5514496", "0.55135787", "0.5512386", "0.55098236", "0.55068445", "0.55068445", "0.55068445", "0.55027366", "0.55021614", "0.55021614", "0.5498617", "0.54980034", "0.5497742", "0.54907775", "0.5485654", "0.54842633", "0.5479204", "0.5470343", "0.5469427", "0.5463157", "0.5462072", "0.5460356", "0.54589415", "0.5458106", "0.5456702", "0.5452123" ]
0.7756274
1
Normalize a DataFrame or Series.
def normalize_2d_or_1d(a, method, axis=None, rank_scale=10000, normalizing_mean=None, normalizing_std=None, normalizing_min=None, normalizing_max=None, normalizing_size=None): if rank(a) == 1: n_a = normalize_1d( a, method, rank_scale=rank_scale, normalizing_mean=normalizing_mean, normalizing_std=normalizing_std, normalizing_min=normalizing_min, normalizing_max=normalizing_max, normalizing_size=normalizing_size) if isinstance(a, Series): return Series(n_a, index=a.index) else: return n_a elif rank(a) == 2: if isinstance(a, DataFrame): if axis == 0 or axis == 1: return a.apply( normalize_1d, **{ 'method': method, 'rank_scale': rank_scale, 'normalizing_mean': normalizing_mean, 'normalizing_std': normalizing_std, 'normalizing_min': normalizing_min, 'normalizing_max': normalizing_max, 'normalizing_size': normalizing_size }, axis=axis) else: # Get normalizing size if normalizing_size is not None: size = normalizing_size else: size = a.values.size if method == '-0-': # Get normalizing mean if normalizing_mean is not None: mean = normalizing_mean else: mean = a.values.mean() # Get normalizing STD if normalizing_std is not None: std = normalizing_std else: std = a.values.std() # Normalize if std == 0: print( 'Not \'0-1\' normalizing (std = 0), but \'/ size\' normalizing ...' ) return a / size else: return (a - mean) / std elif method == '0-1': # Get normalizing min if normalizing_min is not None: min_ = normalizing_min else: min_ = a.values.min() # Get normalizing max if normalizing_max is not None: max_ = normalizing_max else: max_ = a.values.max() # Normalize if max_ - min_ == 0: print( 'Not \'0-1\' normalizing (max - min = 0), but \'/ size\' normalizing ...' ) return a / size else: return (a - min_) / (max_ - min_) elif method == 'rank': raise ValueError( 'Normalizing combination of \'rank\' & axis=\'all\' has not been implemented yet.' ) else: raise ValueError('Can\'t normalize >2 dimensional array-like.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, df):\n return df / df.ix[0, :]", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def normalize_data(data_frame):\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(data_frame)\n return pd.DataFrame(x_scaled)", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def normalize_data(self, df):\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n result[feature_name] = (\n df[feature_name] - min_value) / (max_value - min_value)\n return result", "def normalise_series(to_normalise: pd.Series) -> pd.Series:\n \n # return (to_normalise - to_normalise.mean()) / to_normalise.std() # 0 mean and unit standard deviation\n return to_normalise / to_normalise.std() # positive and unit standard deviation", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize(df, mean=None, std=None):\n if mean is None:\n mean = df.mean(0)\n if std is None:\n std = df.std(0)\n\n # ensure we don't divide by zero in columns with zero std (all entries identical)\n try:\n # if df was a 1d array or pd.Series to begin with, std will be a\n # non-subscriptable float, so we handle that case in except\n std[std == 0] = 1\n except TypeError:\n std = std if std > 0 else 1\n\n # return mean and std to be able to revert normalization later\n return (df - mean) / std, [mean, std]", "def normalize(col):\n maximum=df[col].max()\n minimum=df[col].min()\n for index,row in df.iterrows():\n df.ix[index,col]=(row[col]-minimum)/(maximum-minimum)", "def normalize_price_values(df):\r\n\r\n\tdf_normalize_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tcol_array = np.array(df[col])\r\n\t\tdf_normalize_dict[\"Normalized\" + col] = preprocessing.normalize([col_array])[0]\r\n\r\n\tdf_normalize = pd.DataFrame(df_normalize_dict, index=df.index)\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_normalize", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def normalize(self):\n self._data /= self.norm()", "def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )", "def normalize(df, excludes):\n\n result = df.copy()\n for feature_name in df.columns:\n if feature_name in excludes:\n continue\n try:\n max_value = df[feature_name].max()\n min_value = df[feature_name].min()\n if max_value == min_value:\n min_value = 0\n result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)\n result[feature_name] = result[feature_name].apply(lambda x: round(abs(x), 4))\n except:\n LOGGER.error(f'Error normalizing feature: {feature_name}')\n raise RuntimeError(f'Error normalizing feature: {feature_name}')\n return result", "def df_norm(df):\n return (df - df.mean()) / (df.max() - df.min())", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def normalize(\n ds: xr.Dataset,\n *,\n dim: str,\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n\n if \"norm\" in ds:\n norm = invert(ds.norm, kind)\n else:\n norm = invert(ds.data.mean(dim=dim), kind)\n\n return xr.Dataset(dict(data=apply_correction(ds.data, norm, kind)))", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def normalize_features(df):\n mu = df.mean()\n sigma = df.std()\n\n if (sigma == 0).any():\n raise Exception(\"One or more features had the same value for all samples, and thus could \" +\n \"not be normalized. Please do not include features with only a single value \" +\n \"in your model.\")\n df_normalized = (df - df.mean()) / df.std()\n\n return df_normalized, mu, sigma", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize_features(dataframe):\n print(\"Normalizing feature matrix...\")\n tmp = dataframe\n feats = tmp.drop(columns=['year', 'county'])\n fmax = feats.max()\n fmin = feats.min() \n # normalize the feature matrix\n feats = (feats - fmin) / (fmax - fmin)\n tmp[feats.columns] = feats\n\n return tmp", "def norm_data(df):\n cols = df.columns\n sum = df.sum(axis=1)\n df_new = df.loc[:,cols[1]:cols[-1]].div(sum, axis=0)\n return df_new", "def normalise(raw_data, normalise_by_column=False):\n data = raw_data\n if normalise_by_column:\n #normaliza valores usando o maximo de cada coluna\n col_maxes = raw_data.max(axis=0)\n #divide cada valor pelo maximo correspondente de cada coluna\n data = raw_data / col_maxes[np.newaxis,:] \n else:\n #divide todos os valores pelo maximo do dataset (tudo na mesma escala)\n data = raw_data / raw_data.max()\n\n return data", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result", "def normalize_column(data: DataFrame, column: str):\n m = mean(data[column])\n s = sd(data[column])\n return data[column].map(lambda x: (x - m) / s)", "def normalize(merged_table):\r\n #add normalization\r\n # Minimum\r\n min_val = merged_table['covid_cases'].min()\r\n\r\n # Maximum\r\n max_val = merged_table['covid_cases'].max()\r\n\r\n # Calculate a normalized column\r\n normalized = (merged_table['covid_cases'] - min_val) / (max_val - min_val)\r\n\r\n # Add to the dataframe\r\n merged_table['n_covid'] = normalized\r\n return merged_table", "def normalize_dataset(self):", "def Rescaling_Normalization(data: pd.DataFrame) -> pd.DataFrame:\n\n labels = data.pop('Labels')\n\n norm_data = (data - data.min()) / (data.max() - data.min())\n\n norm_data['Labels'] = labels\n\n return norm_data", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def normalise_min_max(df):\n return (df - df.min()) / (df.max() - df.min())", "def normalise_min_max(df):\n return (df - df.min()) / (df.max() - df.min())", "def normalize(self, asOf=None, multiplier=100):\n if not asOf:\n x0 = self.iloc[0]\n else:\n x0 = self.loc[asOf]\n return self / x0 * multiplier", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize (a_data,a_column,b_method='MinMax') :\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def normalize(ref_df_col, df_col):\n\n col_mean = ref_df_col.mean()\n col_std = ref_df_col.std()\n\n ref_df_norm_col = (ref_df_col - col_mean) / col_std\n df_norm_col = (df_col - col_mean) / col_std\n\n return ref_df_norm_col, df_norm_col", "def normalize(row):\n study = row['study']\n val = row[key]\n group_mean = df.groupby('study').mean().loc[study,key]\n group_std = df.groupby('study').std().loc[study,key]\n zval = (val - group_mean) / group_std\n return zval", "def normalize(self, X):\n return X - X.mean()", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def standardized(df, df_level=False):\n def _normalize_column(column, mean=None, std=None):\n def _normalize_value(x, mean, std):\n return (x - mean) / std\n\n mean = mean if mean else DFMatrixUtils.get_mean_value(column)\n std = std if std else DFMatrixUtils.get_std_value(column)\n normalized = [_normalize_value(x, mean, std) for x in column]\n return normalized\n\n df_mean = None\n df_std = None\n if df_level:\n df_mean = DFMatrixUtils.get_mean_value(df)\n df_std = DFMatrixUtils.get_std_value(df)\n\n df = df.apply(lambda column: _normalize_column(column,\n mean=df_mean,\n std=df_std), axis=0)\n return df", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def normalize(data, name):\n cols = list(data.columns)\n vals = data.values\n for i in range(len(vals)):\n v = vals[i]\n l = np.sum(v[0:len(v)-1])\n if l != 0:\n t = v[0:len(v)-1]/l\n v = np.append(t, v[-1])\n vals[i] = v\n write_data(vals, cols, name)", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalize(x):\r\n return x/norm(x)", "def normalize(dframe):\n # result={}\n # for i in range(len(dframe.columns)):\n # m = mean[dframe.columns[i]]\n # s = std[dframe.columns[i]]\n # x = dframe[dframe.columns[i]]\n # x = 0.5*(np.tanh(0.1*(x-m)/s)+1)\n # dframe[dframe.columns[i]] = x\n\n for column in dframe:\n \tmean = dframe[column].mean()\n \tstd = dframe[column].std()\n \tdframe[column] = 0.5*(np.tanh(0.1*(dframe[column]-mean)/std)+1)", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def normalize_inputs(df, metrics):\n for m in metrics:\n mean = np.mean(df[m])\n stdev = np.std(df[m])\n def std_normalize(x):\n return (x - mean) / stdev\n #df[m] = df[m].map(std_normalize)\n xmin = min(df[m])\n xmax = max(df[m])\n def minmax_normalize(x):\n return (x - xmin) / (xmax - xmin)\n df[m] = df[m].map(minmax_normalize)\n return df", "def normalize(s, lo_pctl=0.01, hi_pctl=.99):\n\n data_types = {pd.core.series.Series: (lambda x: x.values),\n np.ndarray: (lambda x: x), \n list: (lambda x: np.array(x))}\n\n this_type = type(s)\n assert this_type in data_types.keys(), 'invalid data type. Enter numpy array, pandas series , or list of float.'\n \n for b in [lo_pctl, hi_pctl]:\n assert (b >= 0) & (b <= 1), 'invalid winsor bound. Value must be fraction: > 0 and < 1.'\n assert lo_pctl < hi_pctl, 'invalid winsor bound. First item '\n\n y = data_types[type(s)](s)\n z = np.empty(y.shape)\n z[:] = np.nan\n\n # Compute mean and stdev excluding outliers defined by lo and hi_pctl\n if len(y) > 1:\n upper_bound = np.nanquantile(y, hi_pctl)\n lower_bound = np.nanquantile(y, lo_pctl)\n with np.errstate(invalid='ignore'): # ignore stupid warning about 'invalid value encountered in less than'\n mu = np.nanmean(y[(y >= lower_bound) & (y <= upper_bound)])\n sigma = np.nanstd(y[(y >= lower_bound) & (y <= upper_bound)])\n\n if sigma == 0:\n sigma = np.nan\n\n # Compute normalized variable\n with np.errstate(invalid='ignore'):\n y[y < lower_bound] = lower_bound\n y[y > upper_bound] = upper_bound\n z = (y - mu) / sigma\n\n return z", "def normalized(self):\n v = self.copy()\n v.normalize()\n return v", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def fit_and_transform(self, df, method='min_max_scaling',\n per_col_scaler=False):\n self.fit(df, method, per_col_scaler)\n normalized_df = self.transform(df)\n return normalized_df", "def normalize(self, factor):", "def normalise_zero_base(df):\n return df / df.iloc[0] - 1", "def normalise_zero_base(df):\n return df / df.iloc[0] - 1", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def dt_normalize(self):\n return DateTimeDefault.register(pandas.Series.dt.normalize)(self)", "def normalize(adata, log_transform=True, **kwargs):\n sc.pp.normalize_total(adata, **kwargs)\n if log_transform:\n sc.pp.log1p(adata)\n\n return adata", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def norm(df,varname):\n return df[varname] / df[varname].max()", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalise_percents(x: np.ndarray or pd.Series, should_impute=False) -> np.ndarray or pd.Series:\n # NOTE Currently python doesn't allow you to write this as 0 <= x <= 1\n assert (0 <= x[~np.isnan(x)]).all() and (x[~np.isnan(x)] <= 1).all(), 'Values must be [0, 1]'\n mu = np.nanmean(x)\n if should_impute:\n x[np.isnan(x)] = mu\n return (x - mu) / np.sqrt(mu * (1 - mu))", "def normalize(feats, train_nid, dtype=np.float32):\n train_feats = feats[train_nid]\n scaler = preprocessing.StandardScaler()\n scaler.fit(train_feats)\n feats = scaler.transform(feats)\n return feats.astype(dtype)", "def normalize_column(df_column, center_at_zero=False):\n\n normalized_array = np.array(df_column, dtype=\"float64\")\n amax, amin = np.max(normalized_array), np.min(normalized_array)\n normalized_array -= amin\n if center_at_zero:\n normalized_array *= 2.0 / (amax - amin)\n normalized_array -= 1.0\n else:\n normalized_array *= 1.0 / (amax - amin)\n return normalized_array", "def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values", "def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values", "def normalize_values(values: ArrayLike, norm: str | float | bool = True) -> np.ndarray:\n values = np.asarray(values)\n assert norm\n\n if isinstance(norm, str):\n if norm == \"first\":\n divisor = values[0]\n elif norm == \"max\":\n divisor = max(values)\n else:\n raise ValueError(f\"Invalid normalization, got {norm=}\")\n else:\n divisor = float(norm)\n\n return values / divisor", "def normalize_columns(df, colnames):\r\n for col in colnames:\r\n s = df[col]\r\n df[col] = s.sub(s.min()).div((s.max() - s.min()))\r\n print(f'''Normalized Columns: {colnames}''')\r\n\r\n return df", "def normalize(self):\n if self.normed:\n return\n self._normalize()", "def normalize_data(self, df: pd.DataFrame, leak_id: int = None) -> pd.DataFrame:\n # replace NaN with None\n return df.where(pd.notnull(df), None)", "def normalizeRows(x):\n N = x.shape[0]\n x /= np.sqrt(np.sum(x ** 2, axis=1)).reshape((N, 1)) + 1e-30\n return x", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def centerMeanAndNormalize(df):\n return minMax(df - df.mean(axis=0))" ]
[ "0.80967456", "0.80151314", "0.80151314", "0.79464513", "0.78324926", "0.7578621", "0.75722766", "0.74043113", "0.7327699", "0.72870654", "0.7236674", "0.71289355", "0.70915204", "0.6990485", "0.69761825", "0.6927697", "0.6883224", "0.68788195", "0.6830347", "0.6810123", "0.6795891", "0.6786825", "0.67341304", "0.67298615", "0.67298615", "0.6728012", "0.6728012", "0.67272794", "0.67176044", "0.67032605", "0.66992646", "0.66951954", "0.66937304", "0.66546494", "0.6654383", "0.6640611", "0.662895", "0.6607412", "0.6606554", "0.65755415", "0.65635574", "0.65588635", "0.65353686", "0.653375", "0.652691", "0.652691", "0.650599", "0.6505959", "0.64753616", "0.6473293", "0.64682597", "0.64545524", "0.64521796", "0.6448925", "0.64480686", "0.64166206", "0.6398718", "0.6374951", "0.63672113", "0.63662124", "0.63658404", "0.63625056", "0.6320858", "0.6317875", "0.6288602", "0.6288602", "0.6277991", "0.62695694", "0.62556094", "0.62376535", "0.62374794", "0.62374663", "0.6236959", "0.6233646", "0.62322074", "0.619517", "0.61892873", "0.6186356", "0.6185701", "0.618486", "0.61749274", "0.61749274", "0.61740404", "0.6174017", "0.61563456", "0.61469954", "0.614675", "0.6136702", "0.61130095", "0.6110807", "0.61084276", "0.60877943", "0.60877943", "0.60744375", "0.60721517", "0.60693467", "0.6067491", "0.60668546", "0.6063977", "0.60634756" ]
0.65927386
39
decorator to register a babel cli handler.
def babel_cli_handler(**options): def decorator(cls): """ decorates the given class and registers an instance of it into available babel cli handlers. :param BabelCLIHandlerBase cls: babel cli handler class. :returns: babel cli handler class. :rtype: BabelCLIHandlerBase """ instance = cls() babel_services.register_cli_handler(instance, **options) return cls return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decorator(cls):\n\n instance = cls()\n babel_services.register_cli_handler(instance, **options)\n\n return cls", "def __init__(self):\n\n super().__init__(BabelCLIHandlersEnum.INIT)", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def register_command(*parse_args, **options):\n def wrapper(function):\n function._is_command = True\n return function\n return wrapper", "def __init__(self):\n\n super().__init__(BabelCLIHandlersEnum.COMPILE)", "def cli(_):\n pass", "def cli(_):\n pass", "def register_cli_commands(app):\n app.cli.add_command(init_events_command)", "def register_command(name):\n\n def register(cmd):\n Facade().register_command(name, cmd)\n return cmd\n\n return register", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def make_command_register(collector):\n\n def _register(*args, name=None):\n a_transform = _transform(*args)\n return collector.register(transform=a_transform, name=name)\n\n return _register", "def register_command(self, func):\n self.commands[func.__name__] = func", "def __call__(self, path):\n def wrapper(application):\n self.register(path, application)\n return application\n return wrapper", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def __init__(self, *args, **kwargs):\n\n super().__init__()\n\n # a dictionary containing cli handlers for different commands.\n # in the form of: {str handler_name: CLIHandlerBase handler}\n self._cli_handlers = DTO()", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def _cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def add_run_hook(h):\n add_hook(run, h)", "def register(self, regex, auth=False, help=\"\"):\n self.regexes.append(re.compile(regex))\n self.helps.append(help)\n self.auths.append(auth)\n\n def _decorator(func):\n self.funcs.append(func)\n return func\n \n return _decorator", "def route(self, command):\n\n def _route(func):\n self._command_hash_views[command] = func\n\n def __route(*args, **kwargs):\n return func(*args, **kwargs)\n\n return __route\n\n return _route", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def cli():\r\n pass", "def command(login_required=True):\n def decorate(f):\n def wrapper(self, *args):\n try:\n return f(self, *args)\n except ApiError as e:\n log_exception(e)\n raise BackendException('dpbx api error \"%s\"' % (e,))\n except Exception as e:\n log_exception(e)\n log.Error('dpbx code error \"%s\"' % (e,), log.ErrorCode.backend_code_error)\n raise\n\n wrapper.__doc__ = f.__doc__\n return wrapper\n return decorate", "def decorator_register(func, name=None):\n self.register_command(func, name, description, show_if, args_opts)\n\n def func_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return func_wrapper", "def cli():\n\n pass", "def entry_point():", "def entry_point():", "def entry_point():", "def entry_point():\n pass", "def subcommand(args=None, parent=subparsers):\n def decorator(func):\n parser = parent.add_parser(func.__name__, description=func.__doc__)\n for arg in args:\n parser.add_argument(*arg[0], **arg[1])\n parser.set_defaults(func=func)\n\n if args is None:\n args = []\n return decorator", "def cli():\n ...", "def add_handler(self, handler):\n pass", "def cli() -> None:\n pass # pragma: no cover", "def _command(self, *cmd, handler=None):", "def cli(**_) -> None:\n pass", "def register(self):\n self.app.bind('CreateSuperUserCommand', CreateSuperUser())\n self.app.bind('InstallCommand', Install())\n self.app.bind(\n 'AdminUserMigrationDirectory',\n os.path.join(package_directory, 'migrations')\n )", "def make_new_handler(self, *args, **kwargs):", "def cli():\n pass", "def monkeypatch_spawn():\n ccompiler.CCompiler.spawn = _commandfile_spawn", "def main_cli():\n pass", "def cmd(**overrides):\n def wrap(cb):\n cb.cli2 = overrides\n return cb\n return wrap", "def command_(self, name):\n def decorator(func):\n func.__name__ = name\n return self.command(func)\n return decorator", "def command(cls, name=None):\n postfix = name\n def decorator(method):\n if postfix is None:\n name = method.__name__\n else:\n name = postfix\n mod = method.__module__\n if mod.startswith('scripts.commands'):\n mod = mod[len('scripts.commands'):]\n mod = mod.lstrip('.')\n if mod == '__main__':\n full_name = name\n else:\n full_name = mod+'.'+name\n\n app = cls\n subcmds = cls.subcommands\n for sub in full_name.split('.')[:-1]:\n if sub not in subcmds:\n sub_app = type(sub+'App', (cli.Application,),{})\n sub_app = app.subcommand(sub)(sub_app)\n subcmds[sub] = (sub_app, {})\n else:\n pass\n\n app, subcmds = subcmds[sub]\n app.__doc__ = importlib.import_module(method.__module__).__doc__\n\n signature = inspect.signature(method)\n arguments = []\n for (arg_name, param) in signature.parameters.items():\n tp = param.annotation\n if isinstance(tp, Option) or isinstance(tp, Flag):\n if tp._names:\n names = tp._names\n else:\n names = ['-'+arg_name[0], '--'+arg_name]\n arguments.append([tp, arg_name, names, param.default, tp._doc])\n\n def main(self, *args):\n kw_args = {}\n for tp, name, _, _, _ in arguments:\n kw_args[name] = getattr(self, name)\n method(*args, **kw_args)\n\n newclass = type(name+'App', (cli.Application,), {\"main\": main})\n newclass.__doc__ = method.__doc__\n newclass = app.subcommand(name)(newclass)\n\n for tp, name, names, default, doc in arguments:\n if isinstance(tp, Option):\n setattr(newclass, name, cli.SwitchAttr(names, default=default, help=doc))\n elif isinstance(tp, Flag):\n setattr(newclass, name, cli.Flag(names, help=doc))\n return method\n\n return decorator", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def register_cli_handler(self, instance, **options):\n\n if not isinstance(instance, self._cli_handler_type):\n raise InvalidCLIHandlerTypeError('Input parameter [{instance}] is '\n 'not an instance of [{handler}].'\n .format(instance=instance,\n handler=self._cli_handler_type))\n\n if instance.get_name() in self._cli_handlers:\n old_instance = self._cli_handlers.get(instance.get_name())\n replace = options.get('replace', False)\n if replace is not True:\n raise DuplicatedCLIHandlerError('There is another registered '\n 'cli handler with name [{name}] '\n 'but \"replace\" option is not set, so '\n 'cli handler [{instance}] could not '\n 'be registered.'\n .format(name=instance.get_name(),\n instance=instance))\n\n print_warning('CLI handler [{old_instance}] is going '\n 'to be replaced by [{new_instance}].'\n .format(old_instance=old_instance,\n new_instance=instance))\n\n self._cli_handlers[instance.get_name()] = instance", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def cli():\n return", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def command(\n self,\n name: str,\n aliases: list[str] | None = None,\n *,\n subtype: str | None = None,\n short_help: str | None = None,\n help: str | None = None,\n use_shlex: bool = True,\n ) -> DECORATOR_TYPE:\n\n def decorator(target: DECORATOR_ARGS_TYPE) -> Handler:\n handler = get_handler(target)\n\n self.apps.append(\n App(\n \"message\",\n subtype,\n handler,\n name=name,\n aliases=aliases,\n short_help=short_help,\n help=help,\n is_command=True,\n use_shlex=use_shlex,\n ),\n )\n\n return handler\n\n return decorator", "def on_start(self):\n\n def decorator(coro):\n self._hooks.append((\"start\", coro))\n return coro\n\n return decorator", "def self_decorator(self, func):\n # TODO: Any other ways to pass variables to handlers?\n def command_func(update, context, *args, **kwargs):\n return func(self, update, context, *args, **kwargs)\n return command_func", "def chat_handler(self, regex, order=100):\n def decorator(func):\n self.register_handler(regex, func, order)\n return func\n\n return decorator", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def command_entry_point(function):\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n \"\"\" Do housekeeping tasks and execute the wrapped method. \"\"\"\n\n try:\n logging.basicConfig(format='%(name)s: %(message)s',\n level=logging.WARNING,\n stream=sys.stdout)\n # This hack to get the executable name as %(name).\n logging.getLogger().name = os.path.basename(sys.argv[0])\n return function(*args, **kwargs)\n except KeyboardInterrupt:\n logging.warning('Keyboard interrupt')\n return 130 # Signal received exit code for bash.\n except Exception:\n logging.exception('Internal error.')\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.error(\"Please report this bug and attach the output \"\n \"to the bug report\")\n else:\n logging.error(\"Please run this command again and turn on \"\n \"verbose mode (add '-vvvv' as argument).\")\n return 64 # Some non used exit code for internal errors.\n finally:\n logging.shutdown()\n\n return wrapper", "def wrapper(callback):\n self.commands[name] = SlashCommand(callback, name, description, options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def register(self, command: str, handler: Any):\n\n if not command.startswith(\"/\"):\n command = f\"/{command}\"\n\n LOG.info(\"Registering %s to %s\", command, handler)\n self._routes[command].append(handler)", "def command(func: 'function') -> 'function':\n func._decorators = (Bot.command,)\n return func" ]
[ "0.83057785", "0.6101431", "0.6031415", "0.6014547", "0.5841859", "0.5641326", "0.5641326", "0.5525839", "0.55132365", "0.5494176", "0.5488199", "0.54627776", "0.5449929", "0.5416219", "0.53628474", "0.5356179", "0.5328154", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5316033", "0.5312644", "0.52962273", "0.52676064", "0.5236449", "0.5232205", "0.52177274", "0.52085227", "0.5200188", "0.5195558", "0.5195558", "0.5195558", "0.51949954", "0.5188947", "0.51846474", "0.51690775", "0.51647705", "0.51628035", "0.5155644", "0.51530844", "0.5145106", "0.51438683", "0.5141145", "0.5132986", "0.51111513", "0.51046234", "0.50968677", "0.50924695", "0.50924695", "0.50924695", "0.50924695", "0.50924695", "0.5082679", "0.507682", "0.50760835", "0.50760835", "0.507407", "0.5067242", "0.50671375", "0.50639033", "0.5062794", "0.5056813", "0.50511634", "0.50438875", "0.5042665", "0.5042483", "0.5029142" ]
0.8217108
1
decorates the given class and registers an instance of it into available babel cli handlers.
def decorator(cls): instance = cls() babel_services.register_cli_handler(instance, **options) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def babel_cli_handler(**options):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available babel cli handlers.\n\n :param BabelCLIHandlerBase cls: babel cli handler class.\n\n :returns: babel cli handler class.\n :rtype: BabelCLIHandlerBase\n \"\"\"\n\n instance = cls()\n babel_services.register_cli_handler(instance, **options)\n\n return cls\n\n return decorator", "def add_class(self, cls):\n self.commands.append(cls)", "def _class_wrapper(command_class):\n WebBot().register_command(command_class)\n return command_class", "def classmethod(self, encoding):\n # Add encodings for hidden self and cmd arguments.\n encoding = ensure_bytes(encoding)\n typecodes = parse_type_encoding(encoding)\n typecodes.insert(1, b'@:')\n encoding = b''.join(typecodes)\n\n def decorator(f):\n def objc_class_method(objc_cls, objc_cmd, *args):\n py_cls = ObjCClass(objc_cls)\n py_cls.objc_cmd = objc_cmd\n args = convert_method_arguments(encoding, args)\n result = f(py_cls, *args)\n if isinstance(result, ObjCClass):\n result = result.ptr.value\n elif isinstance(result, ObjCInstance):\n result = result.ptr.value\n return result\n name = f.__name__.replace('_', ':')\n self.add_class_method(objc_class_method, name, encoding)\n return objc_class_method\n return decorator", "def register(cls):\n register(cls, cls.provided_class)", "def register(cls, class_):\n cls._registered[class_.tag()] = class_", "def register(cls, class_to_register):\n cls.registered_loaders.append(class_to_register)\n return class_to_register", "def _class(self, _class):\n\n self.__class = _class", "def _class(self, _class):\n\n self.__class = _class", "def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f", "def register(cls: Any, *args: Any, **kwargs: Any) -> Callable:\n\n def wrapper(subclass: Any, *args: Any, **kwargs: Any) -> Any:\n return subclass\n\n return wrapper", "def _decorate(cls):\n global_validators = [session_required, catch_typeerror]\n # Cheat methods _hosts_name_label\n # -------------\n # Methods that have a trivial implementation for all classes.\n # 1. get_by_uuid == getting by ref, so just return uuid for\n # all get_by_uuid() methods.\n \n for api_cls in classes.keys():\n # We'll let the autoplug classes implement these functions\n # themselves - its much cleaner to do it in the base class\n \n get_by_uuid = '%s_get_by_uuid' % api_cls\n get_uuid = '%s_get_uuid' % api_cls\n get_all_records = '%s_get_all_records' % api_cls \n\n def _get_by_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def _get_uuid(_1, _2, ref):\n return xen_api_success(ref)\n\n def unpack(v):\n return v.get('Value')\n\n def _get_all_records(_api_cls):\n return lambda s, session: \\\n xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\\\n for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))\n\n setattr(cls, get_by_uuid, _get_by_uuid)\n setattr(cls, get_uuid, _get_uuid)\n setattr(cls, get_all_records, _get_all_records(api_cls))\n\n # Autoplugging classes\n # --------------------\n # These have all of their methods grabbed out from the implementation\n # class, and wrapped up to be compatible with the Xen-API.\n\n# def getter(ref, type):\n# return XendAPIStore.get(ref, type)\n\n def wrap_method(name, new_f):\n try:\n f = getattr(cls, name)\n wrapped_f = (lambda * args: new_f(f, *args))\n wrapped_f.api = f.api\n wrapped_f.async = f.async\n setattr(cls, name, wrapped_f)\n except AttributeError:\n # Logged below (API call: %s not found)\n pass\n\n\n def setter_event_wrapper(api_cls, attr_name):\n setter_name = '%s_set_%s' % (api_cls, attr_name)\n wrap_method(\n setter_name,\n lambda setter, s, session, ref, *args:\n _setter_event_dispatch(s, setter, api_cls, attr_name,\n session, ref, args))\n\n\n def ctor_event_wrapper(api_cls):\n ctor_name = '%s_create' % api_cls\n wrap_method(\n ctor_name,\n lambda ctor, s, session, *args:\n _ctor_event_dispatch(s, ctor, api_cls, session, args))\n\n\n def dtor_event_wrapper(api_cls):\n dtor_name = '%s_destroy' % api_cls\n wrap_method(\n dtor_name,\n lambda dtor, s, session, ref, *args:\n _dtor_event_dispatch(s, dtor, api_cls, session, ref, args))\n\n\n # Wrapping validators around XMLRPC calls\n # ---------------------------------------\n for api_cls, validator in classes.items():\n def doit(n, takes_instance, async_support=False,\n return_type=None):\n n_ = n.replace('.', '_')\n try:\n f = getattr(cls, n_)\n if n not in argcounts:\n argcounts[n] = f.func_code.co_argcount - 1\n \n validators = takes_instance and validator and \\\n [validator] or []\n \n validators += global_validators\n for v in validators:\n f = v(f)\n f.api = n\n f.async = async_support\n if return_type:\n f.return_type = return_type\n \n setattr(cls, n_, f)\n except AttributeError:\n log.warn(\"API call: %s not found\" % n)\n\n \n ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \\\n + cls.Base_attr_ro\n rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \\\n + cls.Base_attr_rw\n methods = getattr(cls, '%s_methods' % api_cls, []) \\\n + cls.Base_methods\n funcs = getattr(cls, '%s_funcs' % api_cls, []) \\\n + cls.Base_funcs\n\n # wrap validators around readable class attributes\n for attr_name in ro_attrs + rw_attrs:\n doit('%s.get_%s' % (api_cls, attr_name), True,\n async_support=False)\n\n # wrap validators around writable class attrributes\n for attr_name in rw_attrs:\n doit('%s.set_%s' % (api_cls, attr_name), True,\n async_support=False)\n setter_event_wrapper(api_cls, attr_name)\n\n # wrap validators around methods\n for method_name, return_type in methods:\n doit('%s.%s' % (api_cls, method_name), True,\n async_support=True)\n\n # wrap validators around class functions\n for func_name, return_type in funcs:\n \n doit('%s.%s' % (api_cls, func_name), False,\n async_support=True,\n return_type=return_type)\n \n ctor_event_wrapper(api_cls)\n dtor_event_wrapper(api_cls)", "def bind_class(self, className, sequence=None, func=None, add=None):\n return super().bind_class(className, sequence, func, add)", "def class_message_handler(\n self,\n *custom_filters,\n commands=None,\n regexp=None,\n content_types=None,\n state=None,\n run_task=None,\n **kwargs\n ):\n\n def decorator(class_):\n handler = class_()\n\n self.register_message_handler(\n handler.callback,\n *custom_filters,\n commands=commands,\n regexp=regexp,\n content_types=content_types,\n state=state,\n run_task=run_task,\n **kwargs\n )\n return class_\n\n return decorator", "def decorate_class(cls, klass: type, decorate_subclasses=False, **setting_kwds) -> None:\n assert isinstance(klass, type) # in \"debug\" mode only\n if not isinstance(klass, type): # in either mode, have the same awareness at the same time\n return\n\n # Filter out builtins.\n if not get_file_of_object(klass):\n return\n\n def _deco_class(kls: type):\n t = cls(**setting_kwds)\n _ = t(kls)\n # assert _ == kls\n\n def _deco_class_rec(kls: type):\n _deco_class(kls)\n for subclass in kls.__subclasses__():\n _deco_class_rec(subclass)\n\n if decorate_subclasses:\n _deco_class_rec(klass)\n else:\n _deco_class(klass)\n # (_deco_class_rec if decorate_subclasses else _deco_class)(klass)", "def as_handler(cls, **initkwargs):\n @wraps(cls, updated=())\n def handler(asset, *args, **kwargs):\n return handler.handler_class(**initkwargs)(asset, *args, **kwargs)\n handler.handler_class = cls\n handler.supports_check_mode = cls.supports_check_mode\n return handler", "def register_instance(cls):\n\n @functools.wraps(cls)\n def wrapper_decorator(*args, **kwargs):\n\n instance = cls(*args, **kwargs)\n\n Register[cls.__name__] = instance\n\n return instance\n\n return wrapper_decorator", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def auto_validator_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available auto validator hooks.\n\n :param type cls: auto validator hook class.\n\n :returns: auto validator hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls\n\n return decorator", "def extension(klass):\n registry.register(klass)\n return klass", "def extend_class(cls):\n return lambda f: (setattr(cls, f.__name__, f) or f)", "def format_class(cls, **kwargs): \n _doc_formatter = cls._format_obj(**kwargs) \n try:\n assert USE_WRAPT_OR_NOT and wrapt\n warnings.warn('wrapt based class decorator not implemented')\n except:\n pass\n finally:\n def _class_decorator(_cls):\n try: \n meta_cls = _cls.__metaclass__\n except:\n meta_cls = type\n class metaclass_decorator(meta_cls):\n def __new__(meta, name, bases, attrs):\n name = _cls.__name__\n attrs = _cls.__dict__\n bases = _cls.__bases__\n return meta_cls.__new__(meta, name, bases, attrs)\n metaclass_decorator.__name__ = '__metaclass__'\n class new_cls(_cls):\n __metadata__ = metaclass_decorator\n # We set the __doc__ directly when defining the new class, as to avoid the\n # 'non-writable' issue with __doc__\n # indeed attribute '__doc__' of 'type' objects is not writable:\n # \"AttributeError: attribute '__doc__' of 'type' objects is not writable\"\n # hence new-style classes (child of 'object' type) have non writable docstring\n __doc__ = _doc_formatter(_cls)\n # override new_cls.__init__ to prevent recursion, because new_cls.__init__ \n # is _cls.__init__ and it keeps calling itself.\n # name set after the class declaration\n try:\n new_cls.__name__ = _cls.__name__\n except: pass\n try:\n new_cls.__module__ = _cls.__module__\n except: pass\n return new_cls\n return _class_decorator", "def setup_class(cls):", "def setup_class(cls):", "def command(cls, name=None):\n postfix = name\n def decorator(method):\n if postfix is None:\n name = method.__name__\n else:\n name = postfix\n mod = method.__module__\n if mod.startswith('scripts.commands'):\n mod = mod[len('scripts.commands'):]\n mod = mod.lstrip('.')\n if mod == '__main__':\n full_name = name\n else:\n full_name = mod+'.'+name\n\n app = cls\n subcmds = cls.subcommands\n for sub in full_name.split('.')[:-1]:\n if sub not in subcmds:\n sub_app = type(sub+'App', (cli.Application,),{})\n sub_app = app.subcommand(sub)(sub_app)\n subcmds[sub] = (sub_app, {})\n else:\n pass\n\n app, subcmds = subcmds[sub]\n app.__doc__ = importlib.import_module(method.__module__).__doc__\n\n signature = inspect.signature(method)\n arguments = []\n for (arg_name, param) in signature.parameters.items():\n tp = param.annotation\n if isinstance(tp, Option) or isinstance(tp, Flag):\n if tp._names:\n names = tp._names\n else:\n names = ['-'+arg_name[0], '--'+arg_name]\n arguments.append([tp, arg_name, names, param.default, tp._doc])\n\n def main(self, *args):\n kw_args = {}\n for tp, name, _, _, _ in arguments:\n kw_args[name] = getattr(self, name)\n method(*args, **kw_args)\n\n newclass = type(name+'App', (cli.Application,), {\"main\": main})\n newclass.__doc__ = method.__doc__\n newclass = app.subcommand(name)(newclass)\n\n for tp, name, names, default, doc in arguments:\n if isinstance(tp, Option):\n setattr(newclass, name, cli.SwitchAttr(names, default=default, help=doc))\n elif isinstance(tp, Flag):\n setattr(newclass, name, cli.Flag(names, help=doc))\n return method\n\n return decorator", "def as_handler(cls, **initkwargs):\n @wraps(cls, updated=())\n def handler(asset):\n return handler.handler_class(**initkwargs)(asset)\n handler.handler_class = cls\n return handler", "def wrap_class(cls, class_name, class_method_inst):\n if not cls:\n return\n for (method, method_log_args) in class_method_inst.iteritems():\n fn = getattr(cls, method, None)\n if not fn:\n # Not all methods may be in all versions of pymongo...\n continue\n kvs = { 'Class': '%s.%s' % (cls.__module__, cls.__name__),\n 'Function': method,\n 'Action': '%s.%s' % (class_name, method),\n }\n # XXX Not Python2.4-friendly\n setattr(cls, method, oboe.log_method(PYMONGO_LAYER, entry_kvs=kvs, **method_log_args)(fn))", "def register_outliner_class(self, outliner_type, outliner_class):\n\n self._registered_outliner_classes[outliner_type] = outliner_class\n return True", "def serializer(*args, **kwargs):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available serializers.\n\n :param type cls: serializer class.\n\n :returns: serializer class.\n :rtype: type\n \"\"\"\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls\n\n return decorator", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls", "def setup_class(klass):", "def setup_class(klass):", "def visit_ClassDef(self, node):\n self.classes[node.name] = self._generate_pytest_decorators(node.decorator_list)\n self.generic_visit(node)", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def register(self, cls, method=None):\n if isinstance(cls, (classmethod, staticmethod)):\n first_annotation = {}\n for k, v in cls.__func__.__annotations__.items():\n first_annotation[k] = v\n break\n cls.__annotations__ = first_annotation\n return self.dispatcher.register(cls, func=method)", "def register_class(self, instance, name=None):\n prefix_name = name or instance.__class__.__name__\n\n for e in dir(instance):\n if e[0][0] != \"_\":\n self.register_function(\n getattr(instance, e),\n name=\"%s.%s\" % (prefix_name, e)\n )", "def register(\n self, cls: typing.Any, method: typing.Optional[typing.Callable[..., _T]] = None\n ) -> typing.Any:\n return self.dispatcher.register(cls, func=method)", "def add_command(self, name, command_class, ns=None):\n ep = EntryPointWrapper(name, command_class)\n self.add_command_ep(ep, ns=ns)", "def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)", "def log_exceptions(cls):\n\n class NewClass(cls):\n def handle(self, *args, **options):\n try:\n super().handle(args, options)\n except Exception:\n logger.exception(\"Management command '{}' failed. Traceback follows: \".format(sys.argv[1]))\n raise\n\n return NewClass", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def _swig_add_metaclass(metaclass):\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper", "def __call__(self, path):\n def wrapper(application):\n self.register(path, application)\n return application\n return wrapper", "def setup_class(cls):\n cls.handler = MyScaffoldHandler(\"handler\", SkillContext())", "def _swig_add_metaclass(metaclass):\r\n def wrapper(cls):\r\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\r\n return wrapper", "def add_class_hook(cls, event, function):\n if event not in cls.class_hooks:\n cls.class_hooks[event] = []\n cls.class_hooks[event].append(function)", "def use(_):\n\n def wrapper(cls):\n __app_controllers__.append(cls)\n return cls\n\n return wrapper", "def visit_class(self, flags, scope, token, parent):\r\n\r\n # define the class name in the current scope\r\n # see visit_block\r\n #scope.define(SC_FUNCTION, token.children[0])\r\n scope.defer(token)", "def register(dmm, typecls):\n def wraps(fn):\n dmm.register(typecls, fn)\n return fn\n\n return wraps", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def register(cls, name: str) -> Callable:\n\n def inner_wrapper(wrapped_class: GameObjectBase) -> Callable:\n if name in cls.registry:\n logging.warn(\"Class '{0}' already registered; overwriting old value\".format(name))\n cls.registry[name] = wrapped_class\n return wrapped_class\n\n return inner_wrapper", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def __init__(self, decorated):\n self.decorated = decorated", "def _create_class_proxy(cls, theclass):\n\n def make_method(name):\n def method(self, *args, **kw):\n if not object.__getattribute__(self, \"_track_on\")[0]:\n return getattr(\n object.__getattribute__(self, \"_obj\"), name)(*args,\n **kw)\n object.__getattribute__(self, \"_track_on\")[0] = False\n args_value = copy_and_placehold_data(args,\n object.__getattribute__(\n self, \"_track_on\"))\n args_value_copy = copy_call_data(args_value)\n kwargs_value = copy_and_placehold_data(kw,\n object.__getattribute__(\n self, \"_track_on\"))\n kwargs_value_copy = copy_call_data(kwargs_value)\n output = getattr(object.__getattribute__(self, \"_obj\"),\n name)(*args_value, **kwargs_value)\n output_value = copy_and_placehold_data(output,\n object.__getattribute__(\n self, \"_track_on\"))\n output_value_copy = copy_call_data(output_value)\n object.__getattribute__(self, \"_special_data\").append(\n SPECIAL_ATTR_DATA(name, args_value_copy, kwargs_value_copy,\n output_value_copy))\n object.__getattribute__(self, \"_track_on\")[0] = True\n return output_value\n\n return method\n\n namespace = {}\n for name in cls._special_names:\n if hasattr(theclass, name):\n namespace[name] = make_method(name)\n return type(\"%s(%s)\" % (cls.__name__, theclass.__name__), (cls, ),\n namespace)", "def add_plugin(self, cls):\r\n command = cls.__module__.split('.')[-1]\r\n if command not in self.plugins:\r\n self.plugins[command] = {}\r\n self.plugins[command][cls.action] = cls", "def withMaker(cls):\n nodeName = cls.__name__.decode(\"utf-8\")\n if cls.__init__ is object.__init__:\n names = ()\n else:\n names = inspect.getargspec(cls.__init__).args[1:]\n verb = nodeName\n if getattr(cls, \"fromMonte\", None) is not None:\n verb += \".fromMonte\"\n arglist = u\", \".join([u\"args[%s]\" % i for i in range(len(names))])\n runverb = 'RUN_%s' % len(names)\n src = \"\"\"\\\n @audited.DF\n class %sMaker(Object):\n def printOn(self, out):\n out.call(u\"print\", [StrObject(u\"<kernel make%s>\")])\n def recv(self, atom, args):\n if atom is %s:\n return %s(%s)\n raise Refused(self, atom, args)\n \"\"\" % (nodeName, nodeName, runverb, verb, arglist)\n d = globals()\n exec textwrap.dedent(src) in d\n cls.nodeMaker = d[nodeName + \"Maker\"]()\n return cls", "def decorator(cls):\n\n instance = cls()\n auto_validator_services.register_hook(instance)\n\n return cls", "def add_metaclass(metaclass: abc.ABCMeta) -> Callable[[Any], object]:\n\n def wrapper(cls: Any) -> object:\n orig_vars = cls.__dict__.copy()\n slots = orig_vars.get(\"__slots__\")\n if slots is not None:\n if isinstance(slots, str):\n slots = [slots]\n for slots_var in slots:\n orig_vars.pop(slots_var)\n orig_vars.pop(\"__dict__\", None)\n orig_vars.pop(\"__weakref__\", None)\n if hasattr(cls, \"__qualname__\"):\n orig_vars[\"__qualname__\"] = cls.__qualname__\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n\n return wrapper", "def setup_class(cls):\n pass", "def setup_class(cls):\n pass", "def register(cls):\n if not issubclass(cls, Fuzzer):\n raise TypeError(\"Expecting a Fuzzer, not '%s'\" % type(cls))\n _registered.append(cls)", "def register_handler(cls, handler):\n with cls._lock:\n cls._handlers[cls] = handler", "def register(self, cls):\r\n\r\n # Do all checks and complain before changing any state.\r\n if len(cls.tags) == 0:\r\n raise ValueError(\"No tags specified for class {0}\".format(cls.__name__))\r\n\r\n for t in cls.tags:\r\n if t in self._mapping:\r\n other_cls = self._mapping[t]\r\n if cls == other_cls:\r\n # registering the same class multiple times seems silly, but ok\r\n continue\r\n raise ValueError(\"Tag {0} already registered by class {1}.\"\r\n \" Can't register for class {2}\"\r\n .format(t, other_cls.__name__, cls.__name__))\r\n\r\n # Ok, should be good to change state now.\r\n for t in cls.tags:\r\n self._mapping[t] = cls\r\n\r\n # Returning the cls means we can use this as a decorator.\r\n return cls", "def register_command(*parse_args, **options):\n def wrapper(function):\n function._is_command = True\n return function\n return wrapper", "def register_class(cls):\n if cls is RegisteredType:\n raise \"Please do _not_ register RegisteredType!\"\n \n cid = RegisteredType._reg[autoid]\n RegisteredType._reg['classes'][cls] = cid\n RegisteredType._reg['classids'][cid] = cls\n RegisteredType._reg['autoid'] += 1", "def method(cls):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return func(*args, **kwargs)\n setattr(cls, func.__name__, wrapper)\n return func\n return decorator", "def format(cls, **kwargs):\n def _decorator(obj):\n if inspect.isclass(obj):\n _class_decorator = cls.format_class(**kwargs) \n return _class_decorator(obj)\n else:\n _func_decorator = cls.format_method(**kwargs) \n return _func_decorator(obj)\n return _decorator", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def _create_class_proxy(cls, theclass):\n \n def make_method(name):\n def method(self, *args, **kw):\n return getattr(object.__getattribute__(self, \"_obj\"), name)(*args, **kw)\n return method\n \n namespace = {}\n for name in cls._special_names:\n if name in cls._implemented:\n namespace[name] = getattr(cls, name)\n elif hasattr(theclass, name):\n namespace[name] = make_method(name)\n return type(\"%s(%s)\" % (cls.__name__, theclass.__name__), (cls,), namespace)", "def _add_method(cls: type) -> Callable:\n\n def decorator(func):\n func.enable = lambda: _method_enable(\n cls, [_plugin_funcname(func)], func\n )\n func.disable = lambda: _method_disable(\n cls, [_plugin_funcname(func)], func\n )\n return func\n\n return decorator", "def register_dumper(\n self, cls: Union[type, str, None], dumper: Type[Dumper]\n ) -> None:\n if not (cls is None or isinstance(cls, (str, type))):\n raise TypeError(\n f\"dumpers should be registered on classes, got {cls} instead\"\n )\n\n if _psycopg:\n dumper = self._get_optimised(dumper)\n\n # Register the dumper both as its format and as auto\n # so that the last dumper registered is used in auto (%s) format\n if cls:\n for fmt in (PyFormat.from_pq(dumper.format), PyFormat.AUTO):\n if not self._own_dumpers[fmt]:\n self._dumpers[fmt] = self._dumpers[fmt].copy()\n self._own_dumpers[fmt] = True\n\n self._dumpers[fmt][cls] = dumper\n\n # Register the dumper by oid, if the oid of the dumper is fixed\n if dumper.oid:\n if not self._own_dumpers_by_oid[dumper.format]:\n self._dumpers_by_oid[dumper.format] = self._dumpers_by_oid[\n dumper.format\n ].copy()\n self._own_dumpers_by_oid[dumper.format] = True\n\n self._dumpers_by_oid[dumper.format][dumper.oid] = dumper", "def listener(cls):\n func = cls.__init__\n\n # Wraps the class constructor to automate the subscription of methods to\n # event handlers\n @wraps(cls.__init__)\n def new_init(self, *args, **kwargs):\n _subscribe_marked_events(self)\n func(self, *args, **kwargs)\n\n # Patching the constructor\n cls.__init__ = new_init\n return cls", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):" ]
[ "0.7828587", "0.65119535", "0.64277583", "0.6273776", "0.62530273", "0.61781615", "0.61653596", "0.60024714", "0.60024714", "0.5967244", "0.57739705", "0.5762401", "0.5757489", "0.5746612", "0.56892204", "0.5668369", "0.56358856", "0.56313264", "0.5613844", "0.5579808", "0.55725545", "0.55186033", "0.5498826", "0.5498826", "0.5483674", "0.5476988", "0.54615474", "0.5444454", "0.5423939", "0.5397158", "0.53929853", "0.53929853", "0.5389887", "0.5350629", "0.5350629", "0.5343725", "0.53375524", "0.5337088", "0.5311771", "0.5299431", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.529696", "0.5292612", "0.52919006", "0.5288514", "0.528624", "0.52781856", "0.52755004", "0.5274692", "0.52670455", "0.52655435", "0.5262182", "0.5262182", "0.5262182", "0.52406317", "0.52395314", "0.52367246", "0.5215784", "0.5212408", "0.52041775", "0.52041775", "0.5203706", "0.52025217", "0.52001023", "0.5198448", "0.518009", "0.51776344", "0.5175518", "0.51670295", "0.5160435", "0.5159978", "0.51472884", "0.5142496", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833", "0.5139833" ]
0.7280425
1
Generate a dict of security data for "initial" data.
def generate_object_data(self): object_dict = { 'content_type' : str(self.target_object._meta), 'object_id' : str(self.target_object._get_pk_val()), } return object_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_security_data(self):\n timestamp = int(time.time())\n security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n 'security_hash': self.initial_security_hash(timestamp),\n }\n return security_dict", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def _get_user_data(self):\n return {\"key\": self._key}", "def get_initial_author_dict():\n adict = {}\n try:\n ah = run_sql(\"select aterm,hitlist from rnkAUTHORDATA\")\n for (a, h) in ah:\n adict[a] = deserialize_via_marshal(h)\n return adict\n except:\n register_exception(prefix=\"could not read rnkAUTHORDATA\", alert_admin=True)\n return {}", "def safe_data(self):\r\n hide = ['_password', 'password', 'is_admin', 'api_key']\r\n return dict(\r\n [(k, v) for k, v in dict(self).iteritems() if k not in hide]\r\n )", "def generate_random_data() -> dict:\n data = {\n \"_pl\": {\n \"userId\": uuid.uuid4().__str__(),\n \"sensorValue\": random.random(),\n \"sensorId\": \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n }\n }\n return data", "def get_data_to_create_object(self):\n return {}", "def get_initial(self):\n modelo = self.get_object()\n perm_list = [perm.codename for perm in list(modelo.permissions.all())]\n initial = {'perms_proyecto': perm_list, 'perms_sprint': perm_list, 'perms_userstory': perm_list,\n 'perms_flujo': perm_list}\n return initial", "def prepare_student_data(self) -> dict:\n self._filename_pre_data()\n empty_student = {}\n empty_student[\"scoreTimestamp\"] = \"N/A\"\n for i in self.draft_out:\n empty_student[i] = \"N/A\"\n for i in self.pre_data:\n empty_student[i] = self.pre_data[i]\n self.pre_data = empty_student", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def get_data_extra(self, initial):\n extra = {\n 'distance':'10',\n 'latitude':'0',\n 'longitude':'1'\n }\n return dict(initial.items() + extra.items())", "def get_initial(self):\n initial = super(InterventionCreate, self).get_initial()\n infrastructure = self.on_infrastucture()\n signage = self.on_signage()\n if infrastructure:\n # Create intervention on an infrastructure\n initial['infrastructure'] = infrastructure\n elif signage:\n # Create intervention on a signage\n initial['signage'] = signage\n return initial", "def __initializeData():\n\tdata = OrderedDict()\n\tdata['Saved_LIVE'] = False\n\tdata['Saved_POST'] = False\n\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def build_private_data(self, job, private_data_dir):\n private_data = {'credentials': {}}\n for credential in job.credentials.prefetch_related('input_sources__source_credential').all():\n # If we were sent SSH credentials, decrypt them and send them\n # back (they will be written to a temporary file).\n if credential.has_input('ssh_key_data'):\n private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')\n if credential.has_input('ssh_public_key_data'):\n private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')\n\n return private_data", "def Secure(self,passphrase=None,public_attributes=[]):\n\n\t\tif passphrase == None:\n\t\t\treturn self.Dictify()\n\t\telse:\n\t\t\tself.data = Encrypting.Symmetric.Encrypt(json.dumps(self.Dictify()).encode('utf-8'),passphrase).decode('utf-8')\n\t\t\t\n\t\t#secure data and dictify\n\t\tmy_secure_dict = self.Dictify()\n\n\t\t#new obfuscated obj\n\t\tnew_me = {'data':my_secure_dict['data']}\n\n\t\tfor pub_att in public_attributes:\n\t\t\tnew_me[pub_att] = my_secure_dict[pub_att]\n\n\t\treturn new_me", "def _build_identity_dict(mail, display_name, given_name, surname):\r\n meta_dict = {'Shib-Identity-Provider': IDP,\r\n 'REMOTE_USER': REMOTE_USER}\r\n if display_name is not None:\r\n meta_dict['displayName'] = display_name\r\n if mail is not None:\r\n meta_dict['mail'] = mail\r\n if given_name is not None:\r\n meta_dict['givenName'] = given_name\r\n if surname is not None:\r\n meta_dict['sn'] = surname\r\n return meta_dict", "def build_private_data(self, project_update, private_data_dir):\n private_data = {'credentials': {}}\n if project_update.credential:\n credential = project_update.credential\n if credential.has_input('ssh_key_data'):\n private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')\n return private_data", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def valid_data():\n return dict(\n id=str(uuid4()),\n created_at=1559933807392,\n name='my project',\n description='a good project',\n status='in-progress'\n )", "def _yamlData(self):\n data = dict([(key, value)\n for key, value in self.__dict__.iteritems()\n if ((key in self._yamlAttributeKeys)\n and (key not in self._yamlSpeciallyHandledAttributes))])\n data.update(self._preservedExtraAttributes)\n return data", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def initial_security_hash(self, timestamp):\n\n initial_security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n }\n return self.generate_security_hash(**initial_security_dict)", "def get_initial_inputs(self) -> Dict[str, ValueType]:\n if self.const_inputs:\n return self.const_inputs.copy() # Clone predefined\n return {} # Nothing set yet", "def get_data(self):\n data = {}\n _priv = self.get('_private', [])\n\n def check_data(v):\n if isinstance(v, Yaco):\n v = v.get_data()\n elif isinstance(v, list):\n v = [check_data(x) for x in v]\n return v\n\n for k in list(self.keys()):\n if k in _priv:\n continue\n if isinstance(k, (str)) and k and k[0] == '_':\n continue\n # print self.keys()\n # print k, 'x' * 30\n data[k] = check_data(self[k])\n return data", "def transform_credentials(self, data: Dict, **kwargs) -> Dict:\r\n name = data.pop(\"name\")\r\n return_data = {name: data}\r\n return return_data", "def get_initial_data(self):\r\n data = {}\r\n for name, field in self.fields.items():\r\n if hasattr(field, 'widget') and 'ng-model' in field.widget.attrs:\r\n data[name] = self.initial and self.initial.get(name) or field.initial\r\n return data", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def pre_security_group_create(self, resource_dict):\n pass", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def dictOfRandomVariables(self):\n return dict()", "def __dict__(self):\r\n result = {}\r\n result['block_type'] = 'register'\r\n result['prev_hash'] = base64.b64encode(self.prev_hash).decode()\r\n result['timestamp'] = self.time\r\n result['user_id'] = self.user_id\r\n result['public_key'] = base64.b64encode(self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)).decode()\r\n return result", "def get_initial(self):\n initial = {}\n\n if self.kwargs.get('mode', None):\n filename = \"{}.txt\".format(self.kwargs['mode'])\n filepath = os.path.join(settings.BASE_DIR, 'demo_datas', filename)\n if os.path.exists(filepath):\n with io.open(filepath, 'r', encoding='utf-8') as fp:\n initial['foo'] = fp.read()\n\n return initial", "def _collect_data(self):\n data = {\n \"K\": self.K,\n \"root\": self.root\n }\n return data", "def populate_initial_valid_metadata(self):\n pass", "def default_start(self, data):\n return {}", "def default_start(self, data):\n return {}", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def credentials(self) -> Mapping:", "def get_initial_data(self, removed=('billing_country_code', )):\n initial = getattr(self, 'initial_data', None) or {}\n for ea in removed:\n initial.pop(ea, None)\n if not initial:\n return initial\n test_data = MultiValueDict()\n test_data.update(initial)\n self.test_data = test_data\n return test_data", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def _pre_construct(self, data):\n logging.info(\"pre constructing (enter)\")\n self.ids = collections.defaultdict(set)\n self.collecting = True\n pre_construct_data = self.construct(data)\n self.collecting = False\n logging.info(\"pre constructing (exit)\")\n return pre_construct_data", "def example_data():\n\n User.create_user(\"Kate\", \"longpass\", None)\n User.create_user(\"Long\", \"regularpass\", None)\n User.create_user(\"Critter\", \"shortpass\", None)", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def _get_post_data(self, random_str):\n return {\n 'root_domain': '{0}.oregonstate.com'.format(random_str),\n 'soa_primary': 'ns1.oregonstate.com',\n 'soa_contact': 'noc.oregonstate.com',\n 'nameserver_1': 'ns1.oregonstate.com',\n 'ttl_1': '1234'\n }", "def prepare_data(self):", "def random_init(constr=None):\n if constr is not None:\n pass\n else:\n constr = {}\n if \"PERIODS\" in constr.keys():\n periods = constr[\"PERIODS\"]\n else:\n periods = np.random.randint(2, 20)\n if \"AGENTS\" in constr.keys():\n agents = constr[\"AGENTS\"]\n else:\n agents = np.random.randint(100, 5000)\n if \"SEED\" in constr.keys():\n seed = constr[\"SEED\"]\n else:\n seed = np.random.randint(1000, 10000)\n if \"SHARE\" in constr.keys():\n share = constr[\"SHARE\"]\n else:\n share = np.random.uniform(0.1, 0.8)\n if \"FILE\" in constr.keys():\n file = constr[\"FILE\"]\n else:\n file = str(uuid.uuid4()).upper().replace(\"-\", \"\")[0:8]\n\n init_dict = {\"SIMULATION\": {}, \"PARAMS\": {}, \"DIST\": {}}\n\n init_dict[\"SIMULATION\"][\"periods\"] = periods\n init_dict[\"SIMULATION\"][\"agents\"] = agents\n init_dict[\"SIMULATION\"][\"share\"] = share\n init_dict[\"SIMULATION\"][\"seed\"] = seed\n init_dict[\"SIMULATION\"][\"file\"] = file\n\n init_dict[\"PARAMS\"][\"alpha\"] = np.random.normal(1, 0.25)\n init_dict[\"PARAMS\"][\"theta\"] = np.random.normal(0.1, 0.025)\n\n init_dict[\"DIST\"][\"beta\"] = np.random.normal(0.75, 0.1)\n init_dict[\"DIST\"][\"mu\"] = np.random.normal(0.5, 0.1)\n\n print_dict(init_dict)\n\n return init_dict", "def get_static_user_data():\r\n import os\r\n\r\n import yaml\r\n from legion_test.profiler_loader import CREDENTIAL_SECRETS_ENVIRONMENT_KEY\r\n secrets = os.getenv(CREDENTIAL_SECRETS_ENVIRONMENT_KEY)\r\n if not secrets:\r\n raise Exception(\r\n 'Cannot get secrets - {} env variable is not set'.format(CREDENTIAL_SECRETS_ENVIRONMENT_KEY))\r\n\r\n if not os.path.exists(secrets):\r\n raise Exception('Cannot get secrets - file not found {}'.format(secrets))\r\n\r\n with open(secrets, 'r') as stream:\r\n data = yaml.load(stream)\r\n\r\n static_user = data['dex']['config']['staticPasswords'][0]\r\n return {\"login\": static_user['email'], \"password\": static_user['password']}", "def mock_valid_data_without_security_code():\n return {\n \"CreditCardNumber\": \"123454567890123456\",\n \"CardHolder\": \"Test Name\",\n \"ExpirationDate\":\n (dt.datetime.now() + dt.timedelta(hours=1)).isoformat(),\n \"Amount\": 100\n }", "def build_private_data(self, ad_hoc_command, private_data_dir):\n # If we were sent SSH credentials, decrypt them and send them\n # back (they will be written to a temporary file).\n creds = ad_hoc_command.credential\n private_data = {'credentials': {}}\n if creds and creds.has_input('ssh_key_data'):\n private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')\n if creds and creds.has_input('ssh_public_key_data'):\n private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')\n return private_data", "def _get_app_data(self):\n lti = LTI(self.request, self.kwargs[\"uuid\"])\n lti.verify()\n\n app_data = None\n if lti.is_student:\n cache_key = \"app_data|{model:s}|{domain:s}|{context:s}|{resource!s}\".format(\n model=self.model.__name__,\n domain=lti.get_consumer_site().domain,\n context=lti.context_id,\n resource=lti.resource_id,\n )\n\n app_data = cache.get(cache_key)\n permissions = {\"can_access_dashboard\": False, \"can_update\": False}\n\n if not app_data:\n resource = get_or_create_resource(self.model, lti)\n permissions = {\n \"can_access_dashboard\": lti.is_instructor or lti.is_admin,\n \"can_update\": (lti.is_instructor or lti.is_admin)\n and resource.playlist.lti_id == lti.context_id,\n }\n app_data = {\n \"modelName\": self.model.RESOURCE_NAME,\n \"resource\": self.serializer_class(resource).data if resource else None,\n \"state\": \"success\",\n \"sentry_dsn\": settings.SENTRY_DSN,\n \"environment\": settings.ENVIRONMENT,\n \"release\": settings.RELEASE,\n \"static\": {\"svg\": {\"plyr\": static(\"svg/plyr.svg\")}},\n }\n if lti.is_student:\n cache.set(cache_key, app_data, settings.APP_DATA_CACHE_DURATION)\n\n if app_data[\"resource\"] is not None:\n try:\n locale = react_locale(lti.launch_presentation_locale)\n except ImproperlyConfigured:\n locale = \"en_US\"\n\n # Create a short-lived JWT token for the video\n jwt_token = AccessToken()\n jwt_token.payload.update(\n {\n \"session_id\": str(uuid.uuid4()),\n \"context_id\": lti.context_id,\n \"resource_id\": str(lti.resource_id),\n \"roles\": lti.roles,\n \"course\": lti.get_course_info(),\n \"locale\": locale,\n \"permissions\": permissions,\n \"maintenance\": settings.MAINTENANCE_MODE,\n }\n )\n try:\n jwt_token.payload[\"user_id\"] = lti.user_id\n except AttributeError:\n pass\n\n app_data[\"jwt\"] = str(jwt_token)\n\n return app_data", "def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data", "def generate_auth_dict_ws(self,\n nonce: int):\n return {\n \"algo\": \"HS256\",\n \"pKey\": str(self.api_key),\n \"nonce\": str(nonce),\n \"signature\": hmac.new(self.secret_key.encode('utf-8'),\n str(nonce).encode('utf-8'),\n hashlib.sha256).hexdigest()\n }", "def build_init_payload(self) -> dict:\n expectation_suites: list[ExpectationSuite] = [\n self._data_context.get_expectation_suite(expectation_suite_name)\n for expectation_suite_name in self._data_context.list_expectation_suite_names()\n ]\n\n # <WILL> 20220701 - ValidationOperators have been deprecated, so some init_payloads will not have them included\n validation_operators = None\n if hasattr(self._data_context, \"validation_operators\"):\n validation_operators = self._data_context.validation_operators\n\n init_payload = {\n \"platform.system\": platform.system(),\n \"platform.release\": platform.release(),\n \"version_info\": str(sys.version_info),\n \"datasources\": self._data_context.project_config_with_variables_substituted.datasources,\n \"stores\": self._data_context.stores,\n \"validation_operators\": validation_operators,\n \"data_docs_sites\": self._data_context.project_config_with_variables_substituted.data_docs_sites,\n \"expectation_suites\": expectation_suites,\n \"dependencies\": self._get_serialized_dependencies(),\n }\n\n return init_payload", "def get_post_data(self, random_str):\n return {\n 'root_domain': '{0}.{0}.oregonstate.edu'.format(\n random_label() + random_str),\n 'soa_primary': 'ns1.oregonstate.edu',\n 'soa_contact': 'noc.oregonstate.edu',\n 'nameserver_1': 'ns1.oregonstate.edu',\n 'nameserver_2': 'ns2.oregonstate.edu',\n 'nameserver_3': 'ns3.oregonstate.edu',\n 'ttl_1': random_byte(),\n 'ttl_2': random_byte(),\n 'ttl_3': random_byte(),\n }", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def initMetadata(self):\n\n if not 'flags' in self.metadata:\n\n self.metadata['flags'] = {}\n\n if not 'uidvalidity' in self.metadata:\n\n\n self.metadata['uidvalidity'] = random.randint(1000000, 9999999)\n\n if not 'uids' in self.metadata:\n\n self.metadata['uids'] = {}\n\n if not 'uidnext' in self.metadata:\n\n self.metadata['uidnext'] = 1", "def get_new_user_data(cleartext_password):\n new_user_data_dict = {}\n\n new_user_data_dict['unique_user_id'] = uuid.uuid4()\n\n cleartext_password_unicode = cleartext_password.decode('utf-8')\n session_hashed_password = get_session_hash_password(cleartext_password_unicode)\n new_user_data_dict['user_salt'] = base64.b64encode(get_new_8_salt())\n new_user_data_dict['keyset'] = get_new_rsa_keyset(session_hashed_password)\n\n return new_user_data_dict", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def get_initial(self):\n initial = {'proyecto': self.get_proyecto()}\n return initial", "def build_private_data(self, instance, private_data_dir):", "def student_view_data(self):\n def get_student_profile_data():\n # pylint: disable=no-member\n \"\"\"\n Returns profile data for all students on the course.\n \"\"\"\n try:\n regexp_string = self.regexp_from_users_included_email(self.users_included_email)\n re.compile(regexp_string)\n users = self.students_for_course(regexp_string)\n except:\n log.info(\"regexp is invalid: '%s'\", regexp_string)\n users = []\n\n for user in users:\n student_id = anonymous_id_for_user(user, self.course_id)\n profile = user.profile\n\n vip = self.get_vip(user)\n image_url = None\n if vip:\n image_url = \"https://my.imd.org/api/profile/{}/profile-picture-header\".format(vip)\n else:\n if self.is_course_staff:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header-no-vip.gif')\n else:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header.gif')\n\n cohort_name = None\n if (self.is_course_cohorted(self.course_id)):\n cohort_name = self.get_cohort(user, self.course_id).name\n\n yield {\n 'student_id': student_id,\n 'username': user.username,\n 'fullname': profile.name,\n 'vip': vip,\n 'image_url': image_url,\n 'email': user.email,\n 'cohort_name': cohort_name,\n }\n\n return {\n 'student_profile_list': list(get_student_profile_data()),\n 'display_name': self.display_name,\n 'username': self.logged_in_username,\n 'course_is_cohorted': self.enable_cohorts and self.is_course_cohorted(self.course_id),\n 'profile_display': {\n 'profile_display_job_title': self.profile_display_job_title,\n 'profile_display_organisation': self.profile_display_organisation,\n 'profile_display_work_country': self.profile_display_work_country,\n 'profile_display_email_button': self.profile_display_email_button,\n 'profile_display_bio': self.profile_display_bio,\n },\n }", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def gen_keys():", "def _dataset_sentinel_helper(self):\n params = self.cross_experiment_key.parameters\n return dict(\n dataset_hash=params[\"train_dataset\"],\n cv_type=params[\"cross_experiment_params\"].get(\n \"cv_type\", params[\"cross_experiment_params\"].get(\"cross_validation_type\", None)\n ),\n global_random_seed=params[\"cross_experiment_params\"][\"global_random_seed\"],\n random_seeds=params[\"cross_experiment_params\"][\"random_seeds\"],\n )", "def __returnInitialParametersLocal__(self):\n return {}", "def _oauth_payload_generate(self):\n\t\tresult = {\n\t\t\t\"oauth_consumer_key\" : self.key,\n\t\t\t\"oauth_nonce\" : self._oauth_nonce_generate(),\n\t\t\t\"oauth_signature_method\" : \"HMAC-SHA1\",\n\t\t\t\"oauth_timestamp\" : str( int( time.time()) ),\n\t\t\t\"oauth_version\" : \"1.0\"\n\t\t}\n\n\t\t# * if token is unavaliable, this func must be called from request_token\n\t\t# provide callback addr instead.\n\t\t# * access token should have a higher priority ...\n\t\tif self.has_user():\n\t\t\tresult[\"oauth_token\"] = self.a_token\n\t\telse:\n\t\t\tif len( self.token ) > 0:\n\t\t\t\tresult[\"oauth_token\"] = self.token\n\t\t\telse:\n\t\t\t\tresult[\"oauth_callback\"] = self.callback\n\n\t\treturn result", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def __makeLoginDict(self, loginName, password, data=None):\n dict = {\n 'accountName': loginName,\n 'password': password\n }\n if data:\n dict.update(data)\n return dict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def createDict( self ):\n self.d = {}\n self.d['comp1'] = compensation_channel('comp1', 0, (-479.0, -10.0))\n self.d['comp2'] = compensation_channel('comp2', 1, (-479.0, -10.0))\n self.addCalibration()", "def _build_data(self):\n licence_types = [('all', 'All')] + [(lt.pk, lt.display_name) for lt in LicenceType.objects.all()]\n data = {\n 'applications': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n 'status': {\n 'values': [],\n }\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'licences': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'returns': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n }\n }\n return data", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def set_params_data(self):\n for key in self.params:\n self.params_data[key] = {}\n self.params_data[key]['x'] = [i[0] for i in self.rand_points]\n self.params_data[key]['y'] = [i[1] for i in self.rand_points]\n self.params_data[key]['z'] = self.generate_random_data(\n min_=self.params[key]['min'],\n max_=self.params[key]['max'],\n len_=len(self.rand_points)\n )\n return self.params_data", "def _prepare_data(\n self,\n request_data: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Any]:\n if request_data is None:\n request_data = {}\n request_data['page.rows'] = self._rows_in_page\n if self._current_row:\n request_data['page.number'] = \\\n self._current_row // self._rows_in_page + 1\n else:\n # Page number starts from 0\n page_number = self._min_row // self._rows_in_page\n # But for request page number starts from 1\n request_data['page.number'] = page_number + 1\n self._current_row = self._rows_in_page * page_number\n return request_data", "def input_data_initialised(n_ops, power_system):\r\n time_zeros = np.zeros((n_ops, 1))\r\n power_zeros = np.zeros((n_ops, power_system['n_buses']))\r\n states_initial = np.zeros((n_ops, power_system['n_states']))\r\n\r\n states_results_zeros = np.zeros((n_ops, power_system['n_states']))\r\n states_t_results_zeros = np.zeros((n_ops, power_system['n_states']))\r\n data_type_zeros = np.zeros((n_ops, power_system['n_states']))\r\n\r\n data_initialised = {'time': time_zeros,\r\n 'power': power_zeros,\r\n 'states_initial': states_initial,\r\n 'states_results': states_results_zeros,\r\n 'states_t_results': states_t_results_zeros,\r\n 'data_type': data_type_zeros}\r\n\r\n return data_initialised", "def getJSONData(self):\n return {\"pubkey\": self.pubkey, \"privkey\": self.privkey}", "def data(self) -> dict:\n raise NotImplementedError()", "def init_data_for_users(db_data):\n users = db_data.get('user')\n if users is not None:\n rows = users.get('data')\n for row in rows:\n user = User(name=row[0], password=generate_password_hash(row[1]))\n db_add_and_commit(db, user)", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def _empty_data(self):\n return {\n \"info\": {\n \"root_cache_dir\": self._get_default_cache_dir(),\n \"root_downloads_dir\": self._get_default_downloads_dir(),\n },\n \"dataset\": {},\n \"category\": {}\n }", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }", "def from_data(self, data):\r\n for field in (field for field in self.SAVE_FIELDS if field not in data):\r\n cid = debug_id(guild=self.game.guild, user=self.user, charname=data.get('name',None))\r\n wg.log.warning(f\"Character {cid} missing field {field}\")\r\n\r\n for field in data:\r\n # Expects secured data\r\n setattr(self, field, data[field])", "def _create_resource_consumption_dict():\n\n returned_resource_dict = {}\n\n # things that are quantities should start at 0.0\n for resource in resource_constants.quantity_resources:\n returned_resource_dict[resource] = 0.0\n\n for resource in resource_constants.item_resources:\n # double check there is no overlap...\n if resource in resource_constants.quantity_resources:\n raise InternalRepyError(\"Resource '\"+resource+\"' cannot be both quantity and item based!\")\n\n returned_resource_dict[resource] = set()\n\n # I need locks to protect races in accesses to some items...\n returned_resource_dict['fungible_locks'] = {}\n for init_resource in resource_constants.fungible_item_resources:\n returned_resource_dict['fungible_locks'][init_resource] = threading.Lock()\n\n returned_resource_dict['renewable_locks'] = {}\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_locks'][init_resource] = threading.Lock()\n\n\n # I also need to track when the last update of a renewable resource occurred\n returned_resource_dict['renewable_update_time'] = {}\n\n # (Aside) JAC: I've thought about this and looked through the commit history.\n # I don't see any reason to initialize the renewable resources with the\n # current time (as was done before).\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_update_time'][init_resource] = 0.0\n\n\n return returned_resource_dict", "def generate_auth_dict(self) -> Dict[str, str]:\n\n # api.exchange.bitcoin.com uses Basic Authentication https://api.exchange.bitcoin.com/#authentication\n message = self.api_key + \":\" + self.secret_key\n signature = base64.b64encode(bytes(message, \"utf8\")).decode(\"utf8\")\n\n return {\n \"signature\": signature\n }", "def populate_data():\n values = dict()\n values['name'] = input('Enter Your full name : ')\n values['email'] = input('Enter Your email : ')\n values['phone'] = input('Enter Your phone number : ')\n values['gender'] = input(\n 'Enter Your gender \"Male\" \"Female\" \"Other\" : ')\n values['dob'] = input('Enter Your Date of Birh : ')\n values['latitude'] = input('Enter Your latitude : ')\n values['longitude'] = input('Enter Your longitude : ')\n values['image'] = input('Enter Your image : ')\n values['social_media'] = input('Enter Your social_media : ')\n return values", "def generate_dict(self):\n # verify preferred timestamp exists in the structure...\n if not self._check_preferred_timestamps():\n raise SampleException(\"Preferred timestamp not in particle!\")\n\n # build response structure\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n\n return result", "def create_inputs(self):\n return {}", "def getInitialData(nsmapi):\r\n # Done 6-1-2020\r\n # TODO extract ids not using the regex?\r\n initData = {}\r\n\r\n url = f\"/healthcheck\"\r\n print(\"Running basic healthcheck\")\r\n healthcheckData = nsmapi.call(url, method=\"PUT\", message='{\"id\":[\"default\"]}')\r\n initData[\"healthcheck\"] = healthcheckData\r\n\r\n for i in range(20):\r\n print(f\".\", end=\"\", flush=True)\r\n time.sleep(.5)\r\n print(\"\")\r\n\r\n print(\"Getting initial sensor data\")\r\n url = \"/sensors\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"sensorId\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n print(\"Getting initial domain data\")\r\n url = \"/domain\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"id\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n policyURLs = [\r\n \"/domain/{domainId}/ipspolicies\",\r\n \"/domain/{domainId}/firewallpolicy\",\r\n \"/domain/{domainId}/connectionlimitingpolicies\",\r\n \"/domain/{domainId}/qospolicy\",\r\n \"/protectionoptionspolicy\",\r\n \"/domain/{domainId}/malwarepolicy\",\r\n \"/domain/{domainId}/policygroups\"\r\n ]\r\n\r\n print(\"Getting initial policy data\")\r\n initData[\"policy\"] = {}\r\n for domain in initData[\"domain\"]:\r\n domainId, domainName = domain.split(\",\")\r\n initData[\"policy\"][domainId] = {}\r\n for url in policyURLs:\r\n url = url.replace(\"{domainId}\", domainId)\r\n policyData = nsmapi.call(url)\r\n key = list(policyData.keys())[0]\r\n policyType = url.split(\"/\")[-1].replace(\"policy\", \"\").replace(\"policies\", \"\")\r\n initData[\"policy\"][domainId][policyType] = []\r\n for policy in policyData[key]:\r\n policy = json.dumps(policy)\r\n # pattern = \"\\\"([^\\\"]*?)(id|ID|iD|Id){0,1}(name){0,1}\\\": (.*?),\" - don't seem to work\r\n # extracted = re.findall(pattern, policy) - don'tens seem to works\r\n # initData[\"policy\"][domainId][policyType][\"full\"] = policy\r\n for polK, polV in json.loads(policy).items():\r\n if \"omain\" not in polK.lower():\r\n if \"name\" in polK.lower():\r\n name = polV\r\n elif \"id\" in polK.lower():\r\n id = polV\r\n initData[\"policy\"][domainId][policyType].append((id,name))\r\n\r\n print(\"Got Initial Data\")\r\n\r\n return initData", "def create_key ():" ]
[ "0.78241", "0.6145188", "0.6048201", "0.6043185", "0.6016839", "0.6012802", "0.5965179", "0.5900327", "0.58157974", "0.57789785", "0.5743514", "0.5737723", "0.5720123", "0.5668789", "0.563764", "0.5610418", "0.56098014", "0.5600891", "0.5594923", "0.5593127", "0.5536252", "0.55110127", "0.54786325", "0.54527366", "0.54316235", "0.54256165", "0.54244894", "0.5422683", "0.5421369", "0.54037344", "0.5395697", "0.53953326", "0.53867775", "0.53839487", "0.5381459", "0.5357579", "0.5345585", "0.5345409", "0.53449315", "0.53449315", "0.5340789", "0.53396964", "0.5336201", "0.5318443", "0.5317512", "0.52978307", "0.52849066", "0.528338", "0.52728736", "0.5266325", "0.5260731", "0.5256086", "0.52548146", "0.5253941", "0.52529037", "0.52464", "0.52422506", "0.5236014", "0.5220824", "0.5210711", "0.5208937", "0.5202383", "0.51909405", "0.51841563", "0.51815534", "0.5172001", "0.5160685", "0.5158879", "0.5158355", "0.51560944", "0.515147", "0.5149947", "0.5146575", "0.51442623", "0.5139034", "0.51343536", "0.51297903", "0.51288795", "0.5119207", "0.5118723", "0.510989", "0.5097343", "0.5094215", "0.50932676", "0.50811493", "0.5077604", "0.50742495", "0.50693893", "0.5064707", "0.50629437", "0.5062147", "0.5061902", "0.5056831", "0.50566727", "0.50539654", "0.5050237", "0.50481147", "0.50477874", "0.50472075" ]
0.5106261
81
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( object_id = force_unicode(self.target_object._get_pk_val()), content_type = ContentType.objects.get_for_model(self.target_object), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n content_type = ContentType.objects.get_for_model(self.target_object),\n object_id = force_unicode(self.target_object._get_pk_val()),\n share_date = datetime.datetime.now(),\n )\n \n return new", "def fromSharedItem(cls, sharedItem):\n localpart = None\n for (localpart, domain) in userbase.getAccountNames(sharedItem.store):\n break\n if localpart is None:\n raise NoSuchShare()\n for share in sharedItem.store.query(Share,\n Share.sharedItem == sharedItem):\n break\n else:\n raise NoSuchShare()\n return cls(\n shareID=share.shareID,\n localpart=localpart, domain=domain)", "def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)", "def create_item(self, user: User, **kwargs) -> None:", "def copy(self):\n return Object(_default_item=self._default_item, **self._items)", "def item_shared(self, item):\n self.update_item(item)", "def save_object(self, data):\n return Item(**data)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def cloneItemOnly( self, parent ):\n o_item = self.__class__( parent, self.o_data )\n\n return o_item", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def clone(self, userName):\n if self.store:\n obj = self.store.load_or_create({'name':self.name, 'creator':userName})\n if obj:\n return obj\n \n try:\n obj = self.create()\n for key in self._allattr([self.ID, self.CREATOR, self.CREATED_TIME, self.LAST_MODIFIED]):\n obj._setattr(key, self._getattr(key))\n obj.creator = userName\n return obj\n except Exception as e:\n logger.error(e.message)\n logger.error('can not clone {0}'.format(self.generic()))\n return None", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def post(self):\n data = request.json\n return UserServices(data=data).save_new_item()", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def _create_or_update_packinglistitem(self, item_identifier, item, user, optional_attrs={}):\n try:\n packing_list_item = self.packing_list.packing_list_item_model.objects.get(\n packing_list=self.packing_list,\n item_reference=item_identifier)\n except self.packing_list.packing_list_item_model.DoesNotExist:\n try:\n optional_description = item.optional_description or ''\n except AttributeError:\n optional_description = None\n options = {\n 'requisition': item._meta.verbose_name,\n 'item_description': '{subject_identifier} ({initials}) VISIT:{visit} DOB:{dob} {optional}'.format(\n subject_identifier=item.registered_subject.subject_identifier,\n initials=item.registered_subject.initials,\n visit=item.visit_code,\n dob=item.registered_subject.dob,\n optional=optional_description,\n ),\n 'user_created': user,\n }\n options.update(**optional_attrs)\n packing_list_item = self.packing_list.packing_list_item_model.objects.create(\n packing_list=self.packing_list,\n item_reference=item_identifier,\n **options)\n return packing_list_item", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def setup_public_reusable_item_1(self):\n\n # ensure reusable item is public\n reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n reusableitem.is_public = True\n reusableitem.save()\n\n # add a reference to this reusable item by user 2\n create_toptenlist(self, 'user_2', 2) # create a list for user 2\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n return reusableitem", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def get_or_create(cls, **kwargs):\n item = cls.query.filter_by(**kwargs).first()\n if not item:\n item = cls(**kwargs)\n db.session.add(item)\n db.session.commit()\n return item", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\textra['fileitem'] = linked_item\n\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'name' : linked_item.name,\n\t\t\t'fileitem' : linked_item.file,\n\t\t}", "def create(self, request, *args, **kwargs):\n # Clean up input data\n data = self.clean_data(request.data)\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n # Record the user who created this Part object\n item = serializer.save()\n item.user = request.user\n item.system = False\n\n # quantity field cannot be explicitly adjusted here\n item.quantity = item.item.quantity\n item.save()\n\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def clone_item(item):\n i = h5Item(item.text(0))\n i.path = item.path\n i.listIndex = item.dataIndex\n i.originalIndex = item.originalIndex\n i.data = item.data\n return i", "def copy(self, keep_uid=False, new_parent=None):\n obj = self.__class__(\n parent=new_parent or self.parent,\n data=self.data,\n id=self.id if keep_uid else str(uuid.uuid1()),\n )\n if new_parent or not keep_uid:\n obj.url = obj.generate_url()\n else:\n obj.url = self.url\n return obj", "def shareItem(sharedItem, toRole=None, toName=None, shareID=None,\n interfaces=ALL_IMPLEMENTED):\n warnings.warn(\"Use Role.shareItem() instead of sharing.shareItem().\",\n PendingDeprecationWarning,\n stacklevel=2)\n if toRole is None:\n if toName is not None:\n toRole = getPrimaryRole(sharedItem.store, toName, True)\n else:\n toRole = getEveryoneRole(sharedItem.store)\n return toRole.shareItem(sharedItem, shareID, interfaces)", "def copy(self):\n new = object.__new__(type(self))\n new.approximate_online_count = self.approximate_online_count\n new.approximate_user_count = self.approximate_user_count\n new.description = self.description\n new.discovery_splash_hash = self.discovery_splash_hash\n new.discovery_splash_type = self.discovery_splash_type\n new.emojis = self.emojis.copy()\n features = self.features\n if (features is not None):\n features = (*features,)\n new.features = features\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = self.id\n new.invite_splash_hash = self.invite_splash_hash\n new.invite_splash_type = self.invite_splash_type\n new.stickers = self.stickers.copy()\n new.name = self.name\n return new", "def create_item(obj: endpoint_model):\n # should this error if exists?\n new_obj = db.save(obj)\n return new_obj", "def make_borrowing(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n request_obj = Request.objects.get(id=request.POST['request_id'])\n borrowing = Borrowing(user=request_obj.user, item=item, borrowing_by=request.user)\n borrowing.save()\n request_obj.borrowing = borrowing\n request_obj.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def get_or_create_from_request(self, request: HttpRequest) -> Tuple['Basket', bool]:\n if not hasattr(request, 'session'):\n request.session = {}\n try:\n session_basket_id = request.session[BASKET_ID_SESSION_KEY]\n session_basket = self.get(id=session_basket_id, owner=None)\n except (KeyError, Basket.DoesNotExist):\n session_basket = None\n\n if hasattr(request, 'user') and request.user.is_authenticated:\n try:\n basket, created = self.get_or_create(owner=request.user)\n except self.model.MultipleObjectsReturned:\n # User has multiple baskets, merge them.\n baskets = list(self.filter(owner=request.user))\n basket, created = baskets[0], False\n for other in baskets[1:]:\n basket.merge(other)\n\n if session_basket:\n # Merge session basket into user basket.\n basket.merge(session_basket)\n\n if BASKET_ID_SESSION_KEY in request.session:\n # Delete session basket id from session so that it doesn't get\n # re-fetched while user is still logged in.\n del request.session[BASKET_ID_SESSION_KEY]\n else:\n basket, created = session_basket or self.create(), not session_basket\n request.session[BASKET_ID_SESSION_KEY] = basket.id\n\n return basket, created", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)", "def GetNewItem(self):\n if not self.category.Value:\n cat = 'None'\n else:\n cat = self.category.Value\n \n return Entry(self.name.Value, self.username.Value, self.password.Value, \n cat, self.comments.Value)", "def copy(self):\n new = object.__new__(type(self))\n new.avatar_hash = self.avatar_hash\n new.avatar_type = self.avatar_type\n new.boosts_since = self.boosts_since\n new.flags = self.flags\n new.joined_at = self.joined_at\n new.nick = self.nick\n new.pending = self.pending\n role_ids = self.role_ids\n if (role_ids is not None):\n role_ids = (*role_ids,)\n new.role_ids = role_ids\n new.timed_out_until = self.timed_out_until\n return new", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'text' : linked_item.text,\n\t\t}", "def create_reusable_item_1(self, toptenitem_id, **kwargs):\n item_detail_url = reverse('topTenLists:TopTenItems-detail', kwargs={'pk': toptenitem_id})\n\n response = self.client.patch(item_detail_url, kwargs, format='json')\n\n try:\n newreusableitem_id = json.loads(response.content)['reusableItem']['id']\n\n self.reusableitem_1 = ReusableItem.objects.get(pk=newreusableitem_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ReusableItem.objects.count(), 1)\n self.client.logout()\n\n except:\n self.client.logout()\n\n return response", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'url' : linked_item.url,\n\t\t\t'priority' : item.priority.id,\n\t\t\t'delivery_notes' : linked_item.delivery_notes,\n\t\t}", "def get_object(self):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user=self.request.user)\n return p", "def create_sample_item(name, price, data_only):\n item_info = {\n 'name': name,\n 'price': price\n }\n if data_only:\n return item_info\n\n else:\n item_obj = Item.objects.create(**item_info)\n return item_obj, item_info", "def _save_item(request, usage_key, data=None, children=None, metadata=None, nullout=None,\r\n grader_type=None, publish=None):\r\n store = get_modulestore(usage_key)\r\n\r\n try:\r\n existing_item = store.get_item(usage_key)\r\n except ItemNotFoundError:\r\n if usage_key.category in CREATE_IF_NOT_FOUND:\r\n # New module at this location, for pages that are not pre-created.\r\n # Used for course info handouts.\r\n store.create_and_save_xmodule(usage_key)\r\n existing_item = store.get_item(usage_key)\r\n else:\r\n raise\r\n except InvalidLocationError:\r\n log.error(\"Can't find item by location.\")\r\n return JsonResponse({\"error\": \"Can't find item by location: \" + unicode(usage_key)}, 404)\r\n\r\n old_metadata = own_metadata(existing_item)\r\n\r\n if publish:\r\n if publish == 'make_private':\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().unpublish(i.location),\r\n ignore_exception=ItemNotFoundError\r\n )\r\n elif publish == 'create_draft':\r\n # This recursively clones the existing item location to a draft location (the draft is\r\n # implicit, because modulestore is a Draft modulestore)\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().convert_to_draft(i.location),\r\n ignore_exception=DuplicateItemError\r\n )\r\n\r\n if data:\r\n # TODO Allow any scope.content fields not just \"data\" (exactly like the get below this)\r\n existing_item.data = data\r\n else:\r\n data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)\r\n\r\n if children is not None:\r\n children_usage_keys = [\r\n UsageKey.from_string(child)\r\n for child\r\n in children\r\n ]\r\n existing_item.children = children_usage_keys\r\n\r\n # also commit any metadata which might have been passed along\r\n if nullout is not None or metadata is not None:\r\n # the postback is not the complete metadata, as there's system metadata which is\r\n # not presented to the end-user for editing. So let's use the original (existing_item) and\r\n # 'apply' the submitted metadata, so we don't end up deleting system metadata.\r\n if nullout is not None:\r\n for metadata_key in nullout:\r\n setattr(existing_item, metadata_key, None)\r\n\r\n # update existing metadata with submitted metadata (which can be partial)\r\n # IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If\r\n # the intent is to make it None, use the nullout field\r\n if metadata is not None:\r\n for metadata_key, value in metadata.items():\r\n field = existing_item.fields[metadata_key]\r\n\r\n if value is None:\r\n field.delete_from(existing_item)\r\n else:\r\n try:\r\n value = field.from_json(value)\r\n except ValueError:\r\n return JsonResponse({\"error\": \"Invalid data\"}, 400)\r\n field.write_to(existing_item, value)\r\n\r\n if existing_item.category == 'video':\r\n manage_video_subtitles_save(existing_item, request.user, old_metadata, generate_translation=True)\r\n\r\n # commit to datastore\r\n store.update_item(existing_item, request.user.id)\r\n\r\n result = {\r\n 'id': unicode(usage_key),\r\n 'data': data,\r\n 'metadata': own_metadata(existing_item)\r\n }\r\n\r\n if grader_type is not None:\r\n result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type, request.user))\r\n\r\n # Make public after updating the xblock, in case the caller asked\r\n # for both an update and a publish.\r\n if publish and publish == 'make_public':\r\n def _publish(block):\r\n # This is super gross, but prevents us from publishing something that\r\n # we shouldn't. Ideally, all modulestores would have a consistant\r\n # interface for publishing. However, as of now, only the DraftMongoModulestore\r\n # does, so we have to check for the attribute explicitly.\r\n store = get_modulestore(block.location)\r\n store.publish(block.location, request.user.id)\r\n\r\n _xmodule_recurse(\r\n existing_item,\r\n _publish\r\n )\r\n\r\n # Note that children aren't being returned until we have a use case.\r\n return JsonResponse(result)", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def copy(self, **kwargs):\n instance = copy(self)\n kwargs.update({\n 'id': None,\n 'pk': None,\n })\n instance.update_from_kwargs(**kwargs)\n return instance", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\tchecklist_items = ChecklistItem.objects.filter(checklist = item).order_by('-order_index')\n\n\t\textra['checklist_items'] = checklist_items\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t}", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item", "def Copy(self, item):\r\n\r\n self._id = item._id\r\n self._name = item._name\r\n self._title = item._title\r\n self._isGroup = item._isGroup\r\n self._breakColumn = item._breakColumn\r\n self._rect = item._rect\r\n self._font = item._font\r\n self._textColour = item._textColour\r\n self._bitmap = item._bitmap\r\n self._description = item._description\r\n self._rowPos = item._rowPos\r\n self._colPos = item._colPos\r\n self._window = item._window", "def create(self, request, parsed_request_fields, extra_fields,\n local_site=None, *args, **kwargs):\n return self._create_or_update(request, parsed_request_fields,\n extra_fields, None, local_site)", "def get_item_data(item):\n\n return OnedriveItem(\n id=item.get('id'),\n name=item.get('name'),\n web_url=item.get('webUrl'),\n created_by=item.get('createdBy')\n ).__dict__", "def get_item(self, id: str, user: User) -> Optional[T]:", "def create_module_item(self, module_item, **kwargs):\n\n unrequired_types = [\"ExternalUrl\", \"Page\", \"SubHeader\"]\n\n if isinstance(module_item, dict) and \"type\" in module_item:\n # content_id is not required for unrequired_types\n if module_item[\"type\"] in unrequired_types or \"content_id\" in module_item:\n kwargs[\"module_item\"] = module_item\n else:\n raise RequiredFieldMissing(\n \"Dictionary with key 'content_id' is required.\"\n )\n else:\n raise RequiredFieldMissing(\"Dictionary with key 'type' is required.\")\n\n response = self._requester.request(\n \"POST\",\n \"courses/{}/modules/{}/items\".format(self.course_id, self.id),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def deriveItem(self, url):\n item_id = url.split('/')[-1]\n # Fetch item and area\n version = 'latest' #TODO: honour the version in the xml payload\n content_tool = getToolByName(self.context, 'content')\n item = content_tool.getRhaptosObject(item_id, version)\n area = self.context\n # We create a copy that we want to clean up later, let's track the id\n to_delete_id = area.generateUniqueId()\n area.invokeFactory(id=to_delete_id, type_name=item.portal_type)\n obj = area._getOb(to_delete_id)\n\n # item must be checked out to area before a fork is possible\n obj.setState('published')\n obj.checkout(item.objectId)\n\n # Do the fork\n forked_obj = obj.forkContent(license='', return_context=True)\n forked_obj.setState('created')\n forked_obj.setGoogleAnalyticsTrackingCode(None)\n\n # remove all roles except those of the author\n forked_obj.resetOptionalRoles()\n # should not be necessary...\n forked_obj.deleteCollaborationRequests()\n owner_id = forked_obj.Creator()\n for user_id in forked_obj.getCollaborators():\n if user_id != owner_id:\n forked_obj.removeCollaborator(user_id)\n\n # Delete temporary copy\n if to_delete_id:\n area.manage_delObjects(ids=[to_delete_id])\n return forked_obj", "def __init__(self, unique_user_ids, unique_item_ids, **kwargs):\n super().__init__(**kwargs)\n self.user_model = EntityModel(unique_user_ids)\n self.item_model = EntityModel(unique_item_ids)\n self.logger = logging.getLogger()", "def create_sample_order_item(item, quantity, data_only):\n order_item_info = {\n 'item': item.pk,\n 'quantity': quantity\n }\n if data_only:\n return order_item_info\n\n else:\n order_item_obj = OrderItem.objects.create(\n item=item,\n quantity=quantity\n )\n order_item_info[\"id\"] = order_item_obj.id\n return order_item_obj, order_item_info", "def perform_create(self, serializer):\n item = models.ProfileItem.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(profile_item=item)", "def clean_copy(self) -> \"StorableObject\":\n return StorableObject(\n id=self.id, data=self.data, tags=self.tags, description=self.description\n )", "def __init__(self, **kwargs):\n self.item = Item(**kwargs)\n self._working_dir = None", "def item_duplicate():\n return {'name':'chair',\n 'value':300}", "def __load_item(item):\n\n itm = dtf.core.item.Item()\n\n itm.name = item.name\n itm.type = item.type\n\n itm.install_name = get_item_attrib(item, \"install_name\")\n itm.local_name = None\n itm.author = get_item_attrib(item, \"author\")\n itm.about = get_item_attrib(item, \"about\")\n itm.version = get_item_attrib(item, \"version\")\n\n return itm", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def newRequest(self):\n return Request( )", "def process_item(self, item, spider):\n\n session = self.Session()\n\n try:\n CreateOrUpdate().create_or_update(item=item, session=session)\n session.commit()\n except:\n # undo in case of errors\n session.rollback()\n raise\n finally:\n session.close()\n\n return item", "def mocked_item(mocked_session):\n test_item = mock.Mock()\n test_item.session = mocked_session\n return test_item", "def make_reusable_item_public(id):\n reusableitem = ReusableItem.objects.get(pk=id)\n reusableitem.is_public = True\n reusableitem.save()\n\n return reusableitem", "def create(self, validated_data):\n\t\tvalidated_data['user'] = self.context['request'].user\n\t\treturn super(FullBoxSerializer, self).create(validated_data)", "def ajax_user(self, user):\r\n return UserTableItem(user, self.type, self.cells, self.container_name,\r\n self.editable)", "def new(self):\n return get_data_for_new_edit(dict(request.GET))", "def get_user_noreq(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n borrowing = Borrowing(user=target_user.first(), item=item, borrowing_by=request.user)\n borrowing.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def process_item(self, item, spider):\n session = self.Session()\n real = Reals(**item)\n\n try:\n session.add(real)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def load(self, request, item, linked_item, extra):\n\t\tfetch_comments = Comment.objects.filter(item = item).order_by('date_time')\n\t\tgravatar_queryset(fetch_comments)\n\n\t\textra['comments'] = fetch_comments\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'text' : linked_item.text,\n\t\t\t'priority' : item.priority.id,\n\t\t\t'delivery_notes' : linked_item.delivery_notes,\n\t\t}", "def get_or_create_from_request(self, request, user=None):\n request_user = None\n if hasattr(request, 'user') and request.user and request.user.is_authenticated:\n request_user = request.user\n\n if not request_user and (not user or not user.is_authenticated):\n return None\n\n session_user = user if user else request_user\n tunnistamo_session_id = request.session.get(\"tunnistamo_session_id\")\n\n tunnistamo_session = None\n if tunnistamo_session_id:\n try:\n tunnistamo_session = self.get(\n pk=tunnistamo_session_id,\n user=session_user,\n )\n except TunnistamoSession.DoesNotExist:\n pass\n\n if not tunnistamo_session:\n tunnistamo_session = self.create(\n user=session_user,\n created_at=now(),\n )\n request.session[\"tunnistamo_session_id\"] = str(tunnistamo_session.id)\n\n return tunnistamo_session", "def itemFromProxy(obj):\n return object.__getattribute__(obj, '_sharedItem')", "def new(self, *args, **kwargs):\n return flattrclient.things.Thing(session=self._session, **kwargs)", "def process_item(self, item, spider):\n if item['name'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.ids_seen.add(item['name'])\n return item #return the item", "def share_sharedlist(shared_list_id):\n\n email = request.form[\"email\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n shared_user = User.query.filter_by(email=email).first()\n\n if not shared_user:\n flash(\"No user found. Please enter a valid email.\")\n return redirect(f\"/shared-lists/{shared_list_id}\")\n\n shared_user.shared_lists.append(to_do_list)\n flash(f\"This list has been shared with {shared_user.name}!\")\n db.session.add(shared_user)\n db.session.commit()\n\n return redirect(f\"/shared-lists/{shared_list_id}\")", "def __init__(self, sharedItem, sharedInterfaces, shareID):\n rself = _really(self)\n rself._sharedItem = sharedItem\n rself._shareID = shareID\n rself._adapterCache = {}\n # Drop all duplicate shared interfaces.\n uniqueInterfaces = list(sharedInterfaces)\n # XXX there _MUST_ Be a better algorithm for this\n for left in sharedInterfaces:\n for right in sharedInterfaces:\n if left.extends(right) and right in uniqueInterfaces:\n uniqueInterfaces.remove(right)\n for eachInterface in uniqueInterfaces:\n if not eachInterface.providedBy(sharedItem):\n impl = eachInterface(sharedItem, None)\n if impl is not None:\n rself._adapterCache[eachInterface] = impl\n rself._sharedInterfaces = uniqueInterfaces\n # Make me look *exactly* like the item I am proxying for, at least for\n # the purposes of adaptation\n # directlyProvides(self, providedBy(sharedItem))\n directlyProvides(self, uniqueInterfaces)", "def copy(self):\n r = PredictionJobRequest()\n r.__dict__.update(self.__dict__)\n\n return r", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def copy(self):\n cls = type(self)\n # Create a new instance without calling __init__: parameters are\n # different depending on the class.\n new_box = cls.__new__(cls)\n # Copy attributes\n new_box.__dict__.update(self.__dict__)\n return new_box", "def copy(self) -> ItemVariant:\n return ItemVariant(\n self.pak_id,\n self.editor,\n self.vbsp_config,\n self.editor_extra.copy(),\n self.authors.copy(),\n self.tags.copy(),\n self.desc,\n self.icons.copy(),\n self.ent_count,\n self.url,\n self.all_name,\n self.all_icon,\n self.source,\n )", "def copy(self):\n return TodoList(self)", "def get_or_add(self, *args, **kwargs):\n\n key = LazyModelObject.get_identifier(*args, **kwargs)\n try:\n return self[key]\n except KeyError:\n item = LazyModelObject(*args, **kwargs)\n if not item:\n item = None\n self[key] = item\n return item", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def create(cls, user_id, song_id):\n entity = cls(\n user_id=user_id,\n song_id=song_id,\n )\n entity.put()\n\n return entity", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def bookmark(user_id, item_id):\n Bookmark.objects.get_or_create(user=User.objects.get(pk=user_id),\n item=Item.objects.get(pk=item_id))", "def get_object(self,pk):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user_id=pk)\n return p", "def make_prefetched_get_item(\n item: Item,\n refetch_getter: ItemGetter = strongly_consistent_get_item,\n *,\n nicename: str = DEFAULT_ITEM_NAME,\n):\n used = False\n\n def prefetched_get_item(table, key: ItemKey) -> Item:\n nonlocal used\n\n if not used:\n used = True\n return item\n return _nicename_getter(nicename, refetch_getter)(table, key)\n\n return prefetched_get_item", "def create(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n profile = response.data\n user_name = profile.get(\"username\")\n cache.set(f\"{USER_PROFILE_PREFIX}{user_name}\", profile)\n return response", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def make(self, item):\n self.name = item.get(\"name\", \"\")\n self.description = item.get(\"description\", \"\")\n self.type = item.get(\"type\", \"filler\")\n if not isinstance(self.type, str) or self.type is None:\n self.usable = NotUsable\n elif len(self.type) > 1:\n self.set_usable(self.type)\n else:\n self.usable = NotUsable" ]
[ "0.7739369", "0.64699394", "0.6046109", "0.5965286", "0.587094", "0.5790064", "0.5787484", "0.5744283", "0.5732211", "0.56390357", "0.5534467", "0.5512519", "0.5505798", "0.5461782", "0.53700083", "0.53373706", "0.53137255", "0.5306573", "0.5269624", "0.5267749", "0.52600294", "0.52524054", "0.5245564", "0.52407694", "0.52369905", "0.52321607", "0.5224713", "0.52160347", "0.52156115", "0.5203534", "0.51923496", "0.51675695", "0.5161911", "0.5160178", "0.5152076", "0.51370096", "0.5122748", "0.51085126", "0.51064485", "0.507895", "0.50777644", "0.5077542", "0.5073329", "0.50648725", "0.50590813", "0.505698", "0.5055004", "0.505323", "0.50451237", "0.50319207", "0.5029856", "0.5014978", "0.5014978", "0.5012142", "0.50036925", "0.4998741", "0.49939844", "0.49858364", "0.4974964", "0.49650225", "0.49609295", "0.49557462", "0.4947689", "0.49422434", "0.49298403", "0.49294886", "0.491417", "0.49115574", "0.49041942", "0.49030635", "0.49014556", "0.48898277", "0.48841584", "0.48830068", "0.486152", "0.48585996", "0.48406407", "0.48392066", "0.48349586", "0.4834847", "0.48335093", "0.48313612", "0.4822652", "0.48216453", "0.481881", "0.48184735", "0.48133183", "0.481279", "0.4804062", "0.4799658", "0.47908622", "0.47897124", "0.478743", "0.47814462", "0.47733474", "0.47726592", "0.47659373", "0.47657418", "0.47630876", "0.47618014" ]
0.772191
1
Generate a dict of security data for "initial" data.
def generate_object_data(self): object_dict = { 'content_type' : str(self.target_object._meta), 'object_id' : str(self.target_object._get_pk_val()), } return object_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_security_data(self):\n timestamp = int(time.time())\n security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n 'security_hash': self.initial_security_hash(timestamp),\n }\n return security_dict", "def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data", "def _get_user_data(self):\n return {\"key\": self._key}", "def get_initial_author_dict():\n adict = {}\n try:\n ah = run_sql(\"select aterm,hitlist from rnkAUTHORDATA\")\n for (a, h) in ah:\n adict[a] = deserialize_via_marshal(h)\n return adict\n except:\n register_exception(prefix=\"could not read rnkAUTHORDATA\", alert_admin=True)\n return {}", "def safe_data(self):\r\n hide = ['_password', 'password', 'is_admin', 'api_key']\r\n return dict(\r\n [(k, v) for k, v in dict(self).iteritems() if k not in hide]\r\n )", "def generate_random_data() -> dict:\n data = {\n \"_pl\": {\n \"userId\": uuid.uuid4().__str__(),\n \"sensorValue\": random.random(),\n \"sensorId\": \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n }\n }\n return data", "def get_data_to_create_object(self):\n return {}", "def get_initial(self):\n modelo = self.get_object()\n perm_list = [perm.codename for perm in list(modelo.permissions.all())]\n initial = {'perms_proyecto': perm_list, 'perms_sprint': perm_list, 'perms_userstory': perm_list,\n 'perms_flujo': perm_list}\n return initial", "def prepare_student_data(self) -> dict:\n self._filename_pre_data()\n empty_student = {}\n empty_student[\"scoreTimestamp\"] = \"N/A\"\n for i in self.draft_out:\n empty_student[i] = \"N/A\"\n for i in self.pre_data:\n empty_student[i] = self.pre_data[i]\n self.pre_data = empty_student", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def get_data_extra(self, initial):\n extra = {\n 'distance':'10',\n 'latitude':'0',\n 'longitude':'1'\n }\n return dict(initial.items() + extra.items())", "def get_initial(self):\n initial = super(InterventionCreate, self).get_initial()\n infrastructure = self.on_infrastucture()\n signage = self.on_signage()\n if infrastructure:\n # Create intervention on an infrastructure\n initial['infrastructure'] = infrastructure\n elif signage:\n # Create intervention on a signage\n initial['signage'] = signage\n return initial", "def __initializeData():\n\tdata = OrderedDict()\n\tdata['Saved_LIVE'] = False\n\tdata['Saved_POST'] = False\n\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def _generate_voter_in_dict(id: bytes, timestamp: int, prep: 'Prep') -> dict:\n voter_in_dict = {\n \"id\": '0x' + bytes.hex(id),\n \"timestamp\": timestamp,\n \"address\": str(prep.address),\n \"name\": prep.name,\n \"amount\": prep.delegated\n }\n return voter_in_dict", "def build_private_data(self, job, private_data_dir):\n private_data = {'credentials': {}}\n for credential in job.credentials.prefetch_related('input_sources__source_credential').all():\n # If we were sent SSH credentials, decrypt them and send them\n # back (they will be written to a temporary file).\n if credential.has_input('ssh_key_data'):\n private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')\n if credential.has_input('ssh_public_key_data'):\n private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')\n\n return private_data", "def Secure(self,passphrase=None,public_attributes=[]):\n\n\t\tif passphrase == None:\n\t\t\treturn self.Dictify()\n\t\telse:\n\t\t\tself.data = Encrypting.Symmetric.Encrypt(json.dumps(self.Dictify()).encode('utf-8'),passphrase).decode('utf-8')\n\t\t\t\n\t\t#secure data and dictify\n\t\tmy_secure_dict = self.Dictify()\n\n\t\t#new obfuscated obj\n\t\tnew_me = {'data':my_secure_dict['data']}\n\n\t\tfor pub_att in public_attributes:\n\t\t\tnew_me[pub_att] = my_secure_dict[pub_att]\n\n\t\treturn new_me", "def _build_identity_dict(mail, display_name, given_name, surname):\r\n meta_dict = {'Shib-Identity-Provider': IDP,\r\n 'REMOTE_USER': REMOTE_USER}\r\n if display_name is not None:\r\n meta_dict['displayName'] = display_name\r\n if mail is not None:\r\n meta_dict['mail'] = mail\r\n if given_name is not None:\r\n meta_dict['givenName'] = given_name\r\n if surname is not None:\r\n meta_dict['sn'] = surname\r\n return meta_dict", "def build_private_data(self, project_update, private_data_dir):\n private_data = {'credentials': {}}\n if project_update.credential:\n credential = project_update.credential\n if credential.has_input('ssh_key_data'):\n private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')\n return private_data", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def valid_data():\n return dict(\n id=str(uuid4()),\n created_at=1559933807392,\n name='my project',\n description='a good project',\n status='in-progress'\n )", "def _yamlData(self):\n data = dict([(key, value)\n for key, value in self.__dict__.iteritems()\n if ((key in self._yamlAttributeKeys)\n and (key not in self._yamlSpeciallyHandledAttributes))])\n data.update(self._preservedExtraAttributes)\n return data", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def initial_security_hash(self, timestamp):\n\n initial_security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n }\n return self.generate_security_hash(**initial_security_dict)", "def get_initial_inputs(self) -> Dict[str, ValueType]:\n if self.const_inputs:\n return self.const_inputs.copy() # Clone predefined\n return {} # Nothing set yet", "def get_data(self):\n data = {}\n _priv = self.get('_private', [])\n\n def check_data(v):\n if isinstance(v, Yaco):\n v = v.get_data()\n elif isinstance(v, list):\n v = [check_data(x) for x in v]\n return v\n\n for k in list(self.keys()):\n if k in _priv:\n continue\n if isinstance(k, (str)) and k and k[0] == '_':\n continue\n # print self.keys()\n # print k, 'x' * 30\n data[k] = check_data(self[k])\n return data", "def transform_credentials(self, data: Dict, **kwargs) -> Dict:\r\n name = data.pop(\"name\")\r\n return_data = {name: data}\r\n return return_data", "def get_initial_data(self):\r\n data = {}\r\n for name, field in self.fields.items():\r\n if hasattr(field, 'widget') and 'ng-model' in field.widget.attrs:\r\n data[name] = self.initial and self.initial.get(name) or field.initial\r\n return data", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def pre_security_group_create(self, resource_dict):\n pass", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def dictOfRandomVariables(self):\n return dict()", "def __dict__(self):\r\n result = {}\r\n result['block_type'] = 'register'\r\n result['prev_hash'] = base64.b64encode(self.prev_hash).decode()\r\n result['timestamp'] = self.time\r\n result['user_id'] = self.user_id\r\n result['public_key'] = base64.b64encode(self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)).decode()\r\n return result", "def get_initial(self):\n initial = {}\n\n if self.kwargs.get('mode', None):\n filename = \"{}.txt\".format(self.kwargs['mode'])\n filepath = os.path.join(settings.BASE_DIR, 'demo_datas', filename)\n if os.path.exists(filepath):\n with io.open(filepath, 'r', encoding='utf-8') as fp:\n initial['foo'] = fp.read()\n\n return initial", "def _collect_data(self):\n data = {\n \"K\": self.K,\n \"root\": self.root\n }\n return data", "def populate_initial_valid_metadata(self):\n pass", "def default_start(self, data):\n return {}", "def default_start(self, data):\n return {}", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def credentials(self) -> Mapping:", "def get_initial_data(self, removed=('billing_country_code', )):\n initial = getattr(self, 'initial_data', None) or {}\n for ea in removed:\n initial.pop(ea, None)\n if not initial:\n return initial\n test_data = MultiValueDict()\n test_data.update(initial)\n self.test_data = test_data\n return test_data", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def _pre_construct(self, data):\n logging.info(\"pre constructing (enter)\")\n self.ids = collections.defaultdict(set)\n self.collecting = True\n pre_construct_data = self.construct(data)\n self.collecting = False\n logging.info(\"pre constructing (exit)\")\n return pre_construct_data", "def example_data():\n\n User.create_user(\"Kate\", \"longpass\", None)\n User.create_user(\"Long\", \"regularpass\", None)\n User.create_user(\"Critter\", \"shortpass\", None)", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def _get_post_data(self, random_str):\n return {\n 'root_domain': '{0}.oregonstate.com'.format(random_str),\n 'soa_primary': 'ns1.oregonstate.com',\n 'soa_contact': 'noc.oregonstate.com',\n 'nameserver_1': 'ns1.oregonstate.com',\n 'ttl_1': '1234'\n }", "def prepare_data(self):", "def random_init(constr=None):\n if constr is not None:\n pass\n else:\n constr = {}\n if \"PERIODS\" in constr.keys():\n periods = constr[\"PERIODS\"]\n else:\n periods = np.random.randint(2, 20)\n if \"AGENTS\" in constr.keys():\n agents = constr[\"AGENTS\"]\n else:\n agents = np.random.randint(100, 5000)\n if \"SEED\" in constr.keys():\n seed = constr[\"SEED\"]\n else:\n seed = np.random.randint(1000, 10000)\n if \"SHARE\" in constr.keys():\n share = constr[\"SHARE\"]\n else:\n share = np.random.uniform(0.1, 0.8)\n if \"FILE\" in constr.keys():\n file = constr[\"FILE\"]\n else:\n file = str(uuid.uuid4()).upper().replace(\"-\", \"\")[0:8]\n\n init_dict = {\"SIMULATION\": {}, \"PARAMS\": {}, \"DIST\": {}}\n\n init_dict[\"SIMULATION\"][\"periods\"] = periods\n init_dict[\"SIMULATION\"][\"agents\"] = agents\n init_dict[\"SIMULATION\"][\"share\"] = share\n init_dict[\"SIMULATION\"][\"seed\"] = seed\n init_dict[\"SIMULATION\"][\"file\"] = file\n\n init_dict[\"PARAMS\"][\"alpha\"] = np.random.normal(1, 0.25)\n init_dict[\"PARAMS\"][\"theta\"] = np.random.normal(0.1, 0.025)\n\n init_dict[\"DIST\"][\"beta\"] = np.random.normal(0.75, 0.1)\n init_dict[\"DIST\"][\"mu\"] = np.random.normal(0.5, 0.1)\n\n print_dict(init_dict)\n\n return init_dict", "def get_static_user_data():\r\n import os\r\n\r\n import yaml\r\n from legion_test.profiler_loader import CREDENTIAL_SECRETS_ENVIRONMENT_KEY\r\n secrets = os.getenv(CREDENTIAL_SECRETS_ENVIRONMENT_KEY)\r\n if not secrets:\r\n raise Exception(\r\n 'Cannot get secrets - {} env variable is not set'.format(CREDENTIAL_SECRETS_ENVIRONMENT_KEY))\r\n\r\n if not os.path.exists(secrets):\r\n raise Exception('Cannot get secrets - file not found {}'.format(secrets))\r\n\r\n with open(secrets, 'r') as stream:\r\n data = yaml.load(stream)\r\n\r\n static_user = data['dex']['config']['staticPasswords'][0]\r\n return {\"login\": static_user['email'], \"password\": static_user['password']}", "def mock_valid_data_without_security_code():\n return {\n \"CreditCardNumber\": \"123454567890123456\",\n \"CardHolder\": \"Test Name\",\n \"ExpirationDate\":\n (dt.datetime.now() + dt.timedelta(hours=1)).isoformat(),\n \"Amount\": 100\n }", "def build_private_data(self, ad_hoc_command, private_data_dir):\n # If we were sent SSH credentials, decrypt them and send them\n # back (they will be written to a temporary file).\n creds = ad_hoc_command.credential\n private_data = {'credentials': {}}\n if creds and creds.has_input('ssh_key_data'):\n private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')\n if creds and creds.has_input('ssh_public_key_data'):\n private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')\n return private_data", "def _get_app_data(self):\n lti = LTI(self.request, self.kwargs[\"uuid\"])\n lti.verify()\n\n app_data = None\n if lti.is_student:\n cache_key = \"app_data|{model:s}|{domain:s}|{context:s}|{resource!s}\".format(\n model=self.model.__name__,\n domain=lti.get_consumer_site().domain,\n context=lti.context_id,\n resource=lti.resource_id,\n )\n\n app_data = cache.get(cache_key)\n permissions = {\"can_access_dashboard\": False, \"can_update\": False}\n\n if not app_data:\n resource = get_or_create_resource(self.model, lti)\n permissions = {\n \"can_access_dashboard\": lti.is_instructor or lti.is_admin,\n \"can_update\": (lti.is_instructor or lti.is_admin)\n and resource.playlist.lti_id == lti.context_id,\n }\n app_data = {\n \"modelName\": self.model.RESOURCE_NAME,\n \"resource\": self.serializer_class(resource).data if resource else None,\n \"state\": \"success\",\n \"sentry_dsn\": settings.SENTRY_DSN,\n \"environment\": settings.ENVIRONMENT,\n \"release\": settings.RELEASE,\n \"static\": {\"svg\": {\"plyr\": static(\"svg/plyr.svg\")}},\n }\n if lti.is_student:\n cache.set(cache_key, app_data, settings.APP_DATA_CACHE_DURATION)\n\n if app_data[\"resource\"] is not None:\n try:\n locale = react_locale(lti.launch_presentation_locale)\n except ImproperlyConfigured:\n locale = \"en_US\"\n\n # Create a short-lived JWT token for the video\n jwt_token = AccessToken()\n jwt_token.payload.update(\n {\n \"session_id\": str(uuid.uuid4()),\n \"context_id\": lti.context_id,\n \"resource_id\": str(lti.resource_id),\n \"roles\": lti.roles,\n \"course\": lti.get_course_info(),\n \"locale\": locale,\n \"permissions\": permissions,\n \"maintenance\": settings.MAINTENANCE_MODE,\n }\n )\n try:\n jwt_token.payload[\"user_id\"] = lti.user_id\n except AttributeError:\n pass\n\n app_data[\"jwt\"] = str(jwt_token)\n\n return app_data", "def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data", "def generate_auth_dict_ws(self,\n nonce: int):\n return {\n \"algo\": \"HS256\",\n \"pKey\": str(self.api_key),\n \"nonce\": str(nonce),\n \"signature\": hmac.new(self.secret_key.encode('utf-8'),\n str(nonce).encode('utf-8'),\n hashlib.sha256).hexdigest()\n }", "def build_init_payload(self) -> dict:\n expectation_suites: list[ExpectationSuite] = [\n self._data_context.get_expectation_suite(expectation_suite_name)\n for expectation_suite_name in self._data_context.list_expectation_suite_names()\n ]\n\n # <WILL> 20220701 - ValidationOperators have been deprecated, so some init_payloads will not have them included\n validation_operators = None\n if hasattr(self._data_context, \"validation_operators\"):\n validation_operators = self._data_context.validation_operators\n\n init_payload = {\n \"platform.system\": platform.system(),\n \"platform.release\": platform.release(),\n \"version_info\": str(sys.version_info),\n \"datasources\": self._data_context.project_config_with_variables_substituted.datasources,\n \"stores\": self._data_context.stores,\n \"validation_operators\": validation_operators,\n \"data_docs_sites\": self._data_context.project_config_with_variables_substituted.data_docs_sites,\n \"expectation_suites\": expectation_suites,\n \"dependencies\": self._get_serialized_dependencies(),\n }\n\n return init_payload", "def get_post_data(self, random_str):\n return {\n 'root_domain': '{0}.{0}.oregonstate.edu'.format(\n random_label() + random_str),\n 'soa_primary': 'ns1.oregonstate.edu',\n 'soa_contact': 'noc.oregonstate.edu',\n 'nameserver_1': 'ns1.oregonstate.edu',\n 'nameserver_2': 'ns2.oregonstate.edu',\n 'nameserver_3': 'ns3.oregonstate.edu',\n 'ttl_1': random_byte(),\n 'ttl_2': random_byte(),\n 'ttl_3': random_byte(),\n }", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def initMetadata(self):\n\n if not 'flags' in self.metadata:\n\n self.metadata['flags'] = {}\n\n if not 'uidvalidity' in self.metadata:\n\n\n self.metadata['uidvalidity'] = random.randint(1000000, 9999999)\n\n if not 'uids' in self.metadata:\n\n self.metadata['uids'] = {}\n\n if not 'uidnext' in self.metadata:\n\n self.metadata['uidnext'] = 1", "def get_new_user_data(cleartext_password):\n new_user_data_dict = {}\n\n new_user_data_dict['unique_user_id'] = uuid.uuid4()\n\n cleartext_password_unicode = cleartext_password.decode('utf-8')\n session_hashed_password = get_session_hash_password(cleartext_password_unicode)\n new_user_data_dict['user_salt'] = base64.b64encode(get_new_8_salt())\n new_user_data_dict['keyset'] = get_new_rsa_keyset(session_hashed_password)\n\n return new_user_data_dict", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def get_initial(self):\n initial = {'proyecto': self.get_proyecto()}\n return initial", "def build_private_data(self, instance, private_data_dir):", "def student_view_data(self):\n def get_student_profile_data():\n # pylint: disable=no-member\n \"\"\"\n Returns profile data for all students on the course.\n \"\"\"\n try:\n regexp_string = self.regexp_from_users_included_email(self.users_included_email)\n re.compile(regexp_string)\n users = self.students_for_course(regexp_string)\n except:\n log.info(\"regexp is invalid: '%s'\", regexp_string)\n users = []\n\n for user in users:\n student_id = anonymous_id_for_user(user, self.course_id)\n profile = user.profile\n\n vip = self.get_vip(user)\n image_url = None\n if vip:\n image_url = \"https://my.imd.org/api/profile/{}/profile-picture-header\".format(vip)\n else:\n if self.is_course_staff:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header-no-vip.gif')\n else:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header.gif')\n\n cohort_name = None\n if (self.is_course_cohorted(self.course_id)):\n cohort_name = self.get_cohort(user, self.course_id).name\n\n yield {\n 'student_id': student_id,\n 'username': user.username,\n 'fullname': profile.name,\n 'vip': vip,\n 'image_url': image_url,\n 'email': user.email,\n 'cohort_name': cohort_name,\n }\n\n return {\n 'student_profile_list': list(get_student_profile_data()),\n 'display_name': self.display_name,\n 'username': self.logged_in_username,\n 'course_is_cohorted': self.enable_cohorts and self.is_course_cohorted(self.course_id),\n 'profile_display': {\n 'profile_display_job_title': self.profile_display_job_title,\n 'profile_display_organisation': self.profile_display_organisation,\n 'profile_display_work_country': self.profile_display_work_country,\n 'profile_display_email_button': self.profile_display_email_button,\n 'profile_display_bio': self.profile_display_bio,\n },\n }", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def gen_keys():", "def _dataset_sentinel_helper(self):\n params = self.cross_experiment_key.parameters\n return dict(\n dataset_hash=params[\"train_dataset\"],\n cv_type=params[\"cross_experiment_params\"].get(\n \"cv_type\", params[\"cross_experiment_params\"].get(\"cross_validation_type\", None)\n ),\n global_random_seed=params[\"cross_experiment_params\"][\"global_random_seed\"],\n random_seeds=params[\"cross_experiment_params\"][\"random_seeds\"],\n )", "def __returnInitialParametersLocal__(self):\n return {}", "def _oauth_payload_generate(self):\n\t\tresult = {\n\t\t\t\"oauth_consumer_key\" : self.key,\n\t\t\t\"oauth_nonce\" : self._oauth_nonce_generate(),\n\t\t\t\"oauth_signature_method\" : \"HMAC-SHA1\",\n\t\t\t\"oauth_timestamp\" : str( int( time.time()) ),\n\t\t\t\"oauth_version\" : \"1.0\"\n\t\t}\n\n\t\t# * if token is unavaliable, this func must be called from request_token\n\t\t# provide callback addr instead.\n\t\t# * access token should have a higher priority ...\n\t\tif self.has_user():\n\t\t\tresult[\"oauth_token\"] = self.a_token\n\t\telse:\n\t\t\tif len( self.token ) > 0:\n\t\t\t\tresult[\"oauth_token\"] = self.token\n\t\t\telse:\n\t\t\t\tresult[\"oauth_callback\"] = self.callback\n\n\t\treturn result", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def __makeLoginDict(self, loginName, password, data=None):\n dict = {\n 'accountName': loginName,\n 'password': password\n }\n if data:\n dict.update(data)\n return dict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def createDict( self ):\n self.d = {}\n self.d['comp1'] = compensation_channel('comp1', 0, (-479.0, -10.0))\n self.d['comp2'] = compensation_channel('comp2', 1, (-479.0, -10.0))\n self.addCalibration()", "def _build_data(self):\n licence_types = [('all', 'All')] + [(lt.pk, lt.display_name) for lt in LicenceType.objects.all()]\n data = {\n 'applications': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n 'status': {\n 'values': [],\n }\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'licences': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n },\n 'returns': {\n 'columnDefinitions': [],\n 'filters': {\n 'licenceType': {\n 'values': licence_types,\n },\n },\n 'ajax': {\n 'url': ''\n }\n }\n }\n return data", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def set_params_data(self):\n for key in self.params:\n self.params_data[key] = {}\n self.params_data[key]['x'] = [i[0] for i in self.rand_points]\n self.params_data[key]['y'] = [i[1] for i in self.rand_points]\n self.params_data[key]['z'] = self.generate_random_data(\n min_=self.params[key]['min'],\n max_=self.params[key]['max'],\n len_=len(self.rand_points)\n )\n return self.params_data", "def _prepare_data(\n self,\n request_data: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Any]:\n if request_data is None:\n request_data = {}\n request_data['page.rows'] = self._rows_in_page\n if self._current_row:\n request_data['page.number'] = \\\n self._current_row // self._rows_in_page + 1\n else:\n # Page number starts from 0\n page_number = self._min_row // self._rows_in_page\n # But for request page number starts from 1\n request_data['page.number'] = page_number + 1\n self._current_row = self._rows_in_page * page_number\n return request_data", "def input_data_initialised(n_ops, power_system):\r\n time_zeros = np.zeros((n_ops, 1))\r\n power_zeros = np.zeros((n_ops, power_system['n_buses']))\r\n states_initial = np.zeros((n_ops, power_system['n_states']))\r\n\r\n states_results_zeros = np.zeros((n_ops, power_system['n_states']))\r\n states_t_results_zeros = np.zeros((n_ops, power_system['n_states']))\r\n data_type_zeros = np.zeros((n_ops, power_system['n_states']))\r\n\r\n data_initialised = {'time': time_zeros,\r\n 'power': power_zeros,\r\n 'states_initial': states_initial,\r\n 'states_results': states_results_zeros,\r\n 'states_t_results': states_t_results_zeros,\r\n 'data_type': data_type_zeros}\r\n\r\n return data_initialised", "def getJSONData(self):\n return {\"pubkey\": self.pubkey, \"privkey\": self.privkey}", "def data(self) -> dict:\n raise NotImplementedError()", "def init_data_for_users(db_data):\n users = db_data.get('user')\n if users is not None:\n rows = users.get('data')\n for row in rows:\n user = User(name=row[0], password=generate_password_hash(row[1]))\n db_add_and_commit(db, user)", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def _empty_data(self):\n return {\n \"info\": {\n \"root_cache_dir\": self._get_default_cache_dir(),\n \"root_downloads_dir\": self._get_default_downloads_dir(),\n },\n \"dataset\": {},\n \"category\": {}\n }", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }", "def from_data(self, data):\r\n for field in (field for field in self.SAVE_FIELDS if field not in data):\r\n cid = debug_id(guild=self.game.guild, user=self.user, charname=data.get('name',None))\r\n wg.log.warning(f\"Character {cid} missing field {field}\")\r\n\r\n for field in data:\r\n # Expects secured data\r\n setattr(self, field, data[field])", "def _create_resource_consumption_dict():\n\n returned_resource_dict = {}\n\n # things that are quantities should start at 0.0\n for resource in resource_constants.quantity_resources:\n returned_resource_dict[resource] = 0.0\n\n for resource in resource_constants.item_resources:\n # double check there is no overlap...\n if resource in resource_constants.quantity_resources:\n raise InternalRepyError(\"Resource '\"+resource+\"' cannot be both quantity and item based!\")\n\n returned_resource_dict[resource] = set()\n\n # I need locks to protect races in accesses to some items...\n returned_resource_dict['fungible_locks'] = {}\n for init_resource in resource_constants.fungible_item_resources:\n returned_resource_dict['fungible_locks'][init_resource] = threading.Lock()\n\n returned_resource_dict['renewable_locks'] = {}\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_locks'][init_resource] = threading.Lock()\n\n\n # I also need to track when the last update of a renewable resource occurred\n returned_resource_dict['renewable_update_time'] = {}\n\n # (Aside) JAC: I've thought about this and looked through the commit history.\n # I don't see any reason to initialize the renewable resources with the\n # current time (as was done before).\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_update_time'][init_resource] = 0.0\n\n\n return returned_resource_dict", "def generate_auth_dict(self) -> Dict[str, str]:\n\n # api.exchange.bitcoin.com uses Basic Authentication https://api.exchange.bitcoin.com/#authentication\n message = self.api_key + \":\" + self.secret_key\n signature = base64.b64encode(bytes(message, \"utf8\")).decode(\"utf8\")\n\n return {\n \"signature\": signature\n }", "def populate_data():\n values = dict()\n values['name'] = input('Enter Your full name : ')\n values['email'] = input('Enter Your email : ')\n values['phone'] = input('Enter Your phone number : ')\n values['gender'] = input(\n 'Enter Your gender \"Male\" \"Female\" \"Other\" : ')\n values['dob'] = input('Enter Your Date of Birh : ')\n values['latitude'] = input('Enter Your latitude : ')\n values['longitude'] = input('Enter Your longitude : ')\n values['image'] = input('Enter Your image : ')\n values['social_media'] = input('Enter Your social_media : ')\n return values", "def generate_dict(self):\n # verify preferred timestamp exists in the structure...\n if not self._check_preferred_timestamps():\n raise SampleException(\"Preferred timestamp not in particle!\")\n\n # build response structure\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n\n return result", "def create_inputs(self):\n return {}", "def getInitialData(nsmapi):\r\n # Done 6-1-2020\r\n # TODO extract ids not using the regex?\r\n initData = {}\r\n\r\n url = f\"/healthcheck\"\r\n print(\"Running basic healthcheck\")\r\n healthcheckData = nsmapi.call(url, method=\"PUT\", message='{\"id\":[\"default\"]}')\r\n initData[\"healthcheck\"] = healthcheckData\r\n\r\n for i in range(20):\r\n print(f\".\", end=\"\", flush=True)\r\n time.sleep(.5)\r\n print(\"\")\r\n\r\n print(\"Getting initial sensor data\")\r\n url = \"/sensors\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"sensorId\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n print(\"Getting initial domain data\")\r\n url = \"/domain\"\r\n basicData = json.dumps(nsmapi.call(url))\r\n dataType = url[1:].replace(\"/\", \"_\")\r\n initData[dataType] = []\r\n for id in re.findall(\"\\\"id\\\":.*?, \\\"name\\\":.*?,\", basicData):\r\n if id[-1] == \",\":\r\n id = id[:-1]\r\n id = id.replace(\"\\\"\", \"\")\r\n id = id.replace(\": \", \":\")\r\n num, name = id.split(\",\")\r\n num = num.split(\":\")[-1]\r\n name = name.split(\":\")[-1]\r\n idName = f\"{num},{name}\"\r\n initData[dataType].append(idName)\r\n\r\n policyURLs = [\r\n \"/domain/{domainId}/ipspolicies\",\r\n \"/domain/{domainId}/firewallpolicy\",\r\n \"/domain/{domainId}/connectionlimitingpolicies\",\r\n \"/domain/{domainId}/qospolicy\",\r\n \"/protectionoptionspolicy\",\r\n \"/domain/{domainId}/malwarepolicy\",\r\n \"/domain/{domainId}/policygroups\"\r\n ]\r\n\r\n print(\"Getting initial policy data\")\r\n initData[\"policy\"] = {}\r\n for domain in initData[\"domain\"]:\r\n domainId, domainName = domain.split(\",\")\r\n initData[\"policy\"][domainId] = {}\r\n for url in policyURLs:\r\n url = url.replace(\"{domainId}\", domainId)\r\n policyData = nsmapi.call(url)\r\n key = list(policyData.keys())[0]\r\n policyType = url.split(\"/\")[-1].replace(\"policy\", \"\").replace(\"policies\", \"\")\r\n initData[\"policy\"][domainId][policyType] = []\r\n for policy in policyData[key]:\r\n policy = json.dumps(policy)\r\n # pattern = \"\\\"([^\\\"]*?)(id|ID|iD|Id){0,1}(name){0,1}\\\": (.*?),\" - don't seem to work\r\n # extracted = re.findall(pattern, policy) - don'tens seem to works\r\n # initData[\"policy\"][domainId][policyType][\"full\"] = policy\r\n for polK, polV in json.loads(policy).items():\r\n if \"omain\" not in polK.lower():\r\n if \"name\" in polK.lower():\r\n name = polV\r\n elif \"id\" in polK.lower():\r\n id = polV\r\n initData[\"policy\"][domainId][policyType].append((id,name))\r\n\r\n print(\"Got Initial Data\")\r\n\r\n return initData", "def create_key ():" ]
[ "0.78241", "0.6145188", "0.6048201", "0.6043185", "0.6016839", "0.6012802", "0.5965179", "0.5900327", "0.58157974", "0.57789785", "0.5743514", "0.5737723", "0.5720123", "0.5668789", "0.563764", "0.5610418", "0.56098014", "0.5600891", "0.5594923", "0.5593127", "0.5536252", "0.55110127", "0.54786325", "0.54527366", "0.54316235", "0.54256165", "0.54244894", "0.5422683", "0.5421369", "0.54037344", "0.5395697", "0.53953326", "0.53867775", "0.53839487", "0.5381459", "0.5357579", "0.5345585", "0.5345409", "0.53449315", "0.53449315", "0.5340789", "0.53396964", "0.5336201", "0.5318443", "0.5317512", "0.52978307", "0.52849066", "0.528338", "0.52728736", "0.5266325", "0.5260731", "0.5256086", "0.52548146", "0.5253941", "0.52529037", "0.52464", "0.52422506", "0.5236014", "0.5220824", "0.5210711", "0.5208937", "0.5202383", "0.51909405", "0.51841563", "0.51815534", "0.5172001", "0.5160685", "0.5158879", "0.5158355", "0.51560944", "0.515147", "0.5149947", "0.5146575", "0.51442623", "0.5139034", "0.51343536", "0.51297903", "0.51288795", "0.5119207", "0.5118723", "0.510989", "0.5097343", "0.5094215", "0.50932676", "0.50811493", "0.5077604", "0.50742495", "0.50693893", "0.5064707", "0.50629437", "0.5062147", "0.5061902", "0.5056831", "0.50566727", "0.50539654", "0.5050237", "0.50481147", "0.50477874", "0.50472075" ]
0.5106261
82
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( content_type = ContentType.objects.get_for_model(self.target_object), object_id = force_unicode(self.target_object._get_pk_val()), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n object_id = force_unicode(self.target_object._get_pk_val()),\n content_type = ContentType.objects.get_for_model(self.target_object),\n share_date = datetime.datetime.now(),\n )\n \n return new", "def fromSharedItem(cls, sharedItem):\n localpart = None\n for (localpart, domain) in userbase.getAccountNames(sharedItem.store):\n break\n if localpart is None:\n raise NoSuchShare()\n for share in sharedItem.store.query(Share,\n Share.sharedItem == sharedItem):\n break\n else:\n raise NoSuchShare()\n return cls(\n shareID=share.shareID,\n localpart=localpart, domain=domain)", "def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)", "def create_item(self, user: User, **kwargs) -> None:", "def copy(self):\n return Object(_default_item=self._default_item, **self._items)", "def item_shared(self, item):\n self.update_item(item)", "def save_object(self, data):\n return Item(**data)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def cloneItemOnly( self, parent ):\n o_item = self.__class__( parent, self.o_data )\n\n return o_item", "def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def create_items(sender, instance, **kwargs):\n if instance.item_id is None and instance.item is None:\n item = Item()\n if hasattr(instance, 'active'):\n item.active = getattr(instance, 'active')\n item.save()\n instance.item = item", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def clone(self, userName):\n if self.store:\n obj = self.store.load_or_create({'name':self.name, 'creator':userName})\n if obj:\n return obj\n \n try:\n obj = self.create()\n for key in self._allattr([self.ID, self.CREATOR, self.CREATED_TIME, self.LAST_MODIFIED]):\n obj._setattr(key, self._getattr(key))\n obj.creator = userName\n return obj\n except Exception as e:\n logger.error(e.message)\n logger.error('can not clone {0}'.format(self.generic()))\n return None", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def post(self):\n data = request.json\n return UserServices(data=data).save_new_item()", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def _create_or_update_packinglistitem(self, item_identifier, item, user, optional_attrs={}):\n try:\n packing_list_item = self.packing_list.packing_list_item_model.objects.get(\n packing_list=self.packing_list,\n item_reference=item_identifier)\n except self.packing_list.packing_list_item_model.DoesNotExist:\n try:\n optional_description = item.optional_description or ''\n except AttributeError:\n optional_description = None\n options = {\n 'requisition': item._meta.verbose_name,\n 'item_description': '{subject_identifier} ({initials}) VISIT:{visit} DOB:{dob} {optional}'.format(\n subject_identifier=item.registered_subject.subject_identifier,\n initials=item.registered_subject.initials,\n visit=item.visit_code,\n dob=item.registered_subject.dob,\n optional=optional_description,\n ),\n 'user_created': user,\n }\n options.update(**optional_attrs)\n packing_list_item = self.packing_list.packing_list_item_model.objects.create(\n packing_list=self.packing_list,\n item_reference=item_identifier,\n **options)\n return packing_list_item", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def create(cls):\n return BasketItem(code=str(uuid.uuid4()))", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def setup_public_reusable_item_1(self):\n\n # ensure reusable item is public\n reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n reusableitem.is_public = True\n reusableitem.save()\n\n # add a reference to this reusable item by user 2\n create_toptenlist(self, 'user_2', 2) # create a list for user 2\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n return reusableitem", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def get_or_create(cls, **kwargs):\n item = cls.query.filter_by(**kwargs).first()\n if not item:\n item = cls(**kwargs)\n db.session.add(item)\n db.session.commit()\n return item", "def create(self, request, *args, **kwargs):\n # Clean up input data\n data = self.clean_data(request.data)\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n # Record the user who created this Part object\n item = serializer.save()\n item.user = request.user\n item.system = False\n\n # quantity field cannot be explicitly adjusted here\n item.quantity = item.item.quantity\n item.save()\n\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\textra['fileitem'] = linked_item\n\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'name' : linked_item.name,\n\t\t\t'fileitem' : linked_item.file,\n\t\t}", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def clone_item(item):\n i = h5Item(item.text(0))\n i.path = item.path\n i.listIndex = item.dataIndex\n i.originalIndex = item.originalIndex\n i.data = item.data\n return i", "def copy(self, keep_uid=False, new_parent=None):\n obj = self.__class__(\n parent=new_parent or self.parent,\n data=self.data,\n id=self.id if keep_uid else str(uuid.uuid1()),\n )\n if new_parent or not keep_uid:\n obj.url = obj.generate_url()\n else:\n obj.url = self.url\n return obj", "def shareItem(sharedItem, toRole=None, toName=None, shareID=None,\n interfaces=ALL_IMPLEMENTED):\n warnings.warn(\"Use Role.shareItem() instead of sharing.shareItem().\",\n PendingDeprecationWarning,\n stacklevel=2)\n if toRole is None:\n if toName is not None:\n toRole = getPrimaryRole(sharedItem.store, toName, True)\n else:\n toRole = getEveryoneRole(sharedItem.store)\n return toRole.shareItem(sharedItem, shareID, interfaces)", "def copy(self):\n new = object.__new__(type(self))\n new.approximate_online_count = self.approximate_online_count\n new.approximate_user_count = self.approximate_user_count\n new.description = self.description\n new.discovery_splash_hash = self.discovery_splash_hash\n new.discovery_splash_type = self.discovery_splash_type\n new.emojis = self.emojis.copy()\n features = self.features\n if (features is not None):\n features = (*features,)\n new.features = features\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = self.id\n new.invite_splash_hash = self.invite_splash_hash\n new.invite_splash_type = self.invite_splash_type\n new.stickers = self.stickers.copy()\n new.name = self.name\n return new", "def create_item(obj: endpoint_model):\n # should this error if exists?\n new_obj = db.save(obj)\n return new_obj", "def make_borrowing(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n request_obj = Request.objects.get(id=request.POST['request_id'])\n borrowing = Borrowing(user=request_obj.user, item=item, borrowing_by=request.user)\n borrowing.save()\n request_obj.borrowing = borrowing\n request_obj.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def get_or_create_from_request(self, request: HttpRequest) -> Tuple['Basket', bool]:\n if not hasattr(request, 'session'):\n request.session = {}\n try:\n session_basket_id = request.session[BASKET_ID_SESSION_KEY]\n session_basket = self.get(id=session_basket_id, owner=None)\n except (KeyError, Basket.DoesNotExist):\n session_basket = None\n\n if hasattr(request, 'user') and request.user.is_authenticated:\n try:\n basket, created = self.get_or_create(owner=request.user)\n except self.model.MultipleObjectsReturned:\n # User has multiple baskets, merge them.\n baskets = list(self.filter(owner=request.user))\n basket, created = baskets[0], False\n for other in baskets[1:]:\n basket.merge(other)\n\n if session_basket:\n # Merge session basket into user basket.\n basket.merge(session_basket)\n\n if BASKET_ID_SESSION_KEY in request.session:\n # Delete session basket id from session so that it doesn't get\n # re-fetched while user is still logged in.\n del request.session[BASKET_ID_SESSION_KEY]\n else:\n basket, created = session_basket or self.create(), not session_basket\n request.session[BASKET_ID_SESSION_KEY] = basket.id\n\n return basket, created", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def _create_item(self, item_id: str, data: dict) -> Pipeline:\n return Pipeline(id=item_id, **data)", "def copy(self):\n new = object.__new__(type(self))\n new.avatar_hash = self.avatar_hash\n new.avatar_type = self.avatar_type\n new.boosts_since = self.boosts_since\n new.flags = self.flags\n new.joined_at = self.joined_at\n new.nick = self.nick\n new.pending = self.pending\n role_ids = self.role_ids\n if (role_ids is not None):\n role_ids = (*role_ids,)\n new.role_ids = role_ids\n new.timed_out_until = self.timed_out_until\n return new", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def GetNewItem(self):\n if not self.category.Value:\n cat = 'None'\n else:\n cat = self.category.Value\n \n return Entry(self.name.Value, self.username.Value, self.password.Value, \n cat, self.comments.Value)", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'text' : linked_item.text,\n\t\t}", "def create_reusable_item_1(self, toptenitem_id, **kwargs):\n item_detail_url = reverse('topTenLists:TopTenItems-detail', kwargs={'pk': toptenitem_id})\n\n response = self.client.patch(item_detail_url, kwargs, format='json')\n\n try:\n newreusableitem_id = json.loads(response.content)['reusableItem']['id']\n\n self.reusableitem_1 = ReusableItem.objects.get(pk=newreusableitem_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ReusableItem.objects.count(), 1)\n self.client.logout()\n\n except:\n self.client.logout()\n\n return response", "def get_object(self):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user=self.request.user)\n return p", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'url' : linked_item.url,\n\t\t\t'priority' : item.priority.id,\n\t\t\t'delivery_notes' : linked_item.delivery_notes,\n\t\t}", "def create_sample_item(name, price, data_only):\n item_info = {\n 'name': name,\n 'price': price\n }\n if data_only:\n return item_info\n\n else:\n item_obj = Item.objects.create(**item_info)\n return item_obj, item_info", "def _save_item(request, usage_key, data=None, children=None, metadata=None, nullout=None,\r\n grader_type=None, publish=None):\r\n store = get_modulestore(usage_key)\r\n\r\n try:\r\n existing_item = store.get_item(usage_key)\r\n except ItemNotFoundError:\r\n if usage_key.category in CREATE_IF_NOT_FOUND:\r\n # New module at this location, for pages that are not pre-created.\r\n # Used for course info handouts.\r\n store.create_and_save_xmodule(usage_key)\r\n existing_item = store.get_item(usage_key)\r\n else:\r\n raise\r\n except InvalidLocationError:\r\n log.error(\"Can't find item by location.\")\r\n return JsonResponse({\"error\": \"Can't find item by location: \" + unicode(usage_key)}, 404)\r\n\r\n old_metadata = own_metadata(existing_item)\r\n\r\n if publish:\r\n if publish == 'make_private':\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().unpublish(i.location),\r\n ignore_exception=ItemNotFoundError\r\n )\r\n elif publish == 'create_draft':\r\n # This recursively clones the existing item location to a draft location (the draft is\r\n # implicit, because modulestore is a Draft modulestore)\r\n _xmodule_recurse(\r\n existing_item,\r\n lambda i: modulestore().convert_to_draft(i.location),\r\n ignore_exception=DuplicateItemError\r\n )\r\n\r\n if data:\r\n # TODO Allow any scope.content fields not just \"data\" (exactly like the get below this)\r\n existing_item.data = data\r\n else:\r\n data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)\r\n\r\n if children is not None:\r\n children_usage_keys = [\r\n UsageKey.from_string(child)\r\n for child\r\n in children\r\n ]\r\n existing_item.children = children_usage_keys\r\n\r\n # also commit any metadata which might have been passed along\r\n if nullout is not None or metadata is not None:\r\n # the postback is not the complete metadata, as there's system metadata which is\r\n # not presented to the end-user for editing. So let's use the original (existing_item) and\r\n # 'apply' the submitted metadata, so we don't end up deleting system metadata.\r\n if nullout is not None:\r\n for metadata_key in nullout:\r\n setattr(existing_item, metadata_key, None)\r\n\r\n # update existing metadata with submitted metadata (which can be partial)\r\n # IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If\r\n # the intent is to make it None, use the nullout field\r\n if metadata is not None:\r\n for metadata_key, value in metadata.items():\r\n field = existing_item.fields[metadata_key]\r\n\r\n if value is None:\r\n field.delete_from(existing_item)\r\n else:\r\n try:\r\n value = field.from_json(value)\r\n except ValueError:\r\n return JsonResponse({\"error\": \"Invalid data\"}, 400)\r\n field.write_to(existing_item, value)\r\n\r\n if existing_item.category == 'video':\r\n manage_video_subtitles_save(existing_item, request.user, old_metadata, generate_translation=True)\r\n\r\n # commit to datastore\r\n store.update_item(existing_item, request.user.id)\r\n\r\n result = {\r\n 'id': unicode(usage_key),\r\n 'data': data,\r\n 'metadata': own_metadata(existing_item)\r\n }\r\n\r\n if grader_type is not None:\r\n result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type, request.user))\r\n\r\n # Make public after updating the xblock, in case the caller asked\r\n # for both an update and a publish.\r\n if publish and publish == 'make_public':\r\n def _publish(block):\r\n # This is super gross, but prevents us from publishing something that\r\n # we shouldn't. Ideally, all modulestores would have a consistant\r\n # interface for publishing. However, as of now, only the DraftMongoModulestore\r\n # does, so we have to check for the attribute explicitly.\r\n store = get_modulestore(block.location)\r\n store.publish(block.location, request.user.id)\r\n\r\n _xmodule_recurse(\r\n existing_item,\r\n _publish\r\n )\r\n\r\n # Note that children aren't being returned until we have a use case.\r\n return JsonResponse(result)", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def copy(self, **kwargs):\n instance = copy(self)\n kwargs.update({\n 'id': None,\n 'pk': None,\n })\n instance.update_from_kwargs(**kwargs)\n return instance", "def load(self, request, item, linked_item, extra):\n\t\textra['buttons_update'] = True\n\t\tchecklist_items = ChecklistItem.objects.filter(checklist = item).order_by('-order_index')\n\n\t\textra['checklist_items'] = checklist_items\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t}", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item", "def Copy(self, item):\r\n\r\n self._id = item._id\r\n self._name = item._name\r\n self._title = item._title\r\n self._isGroup = item._isGroup\r\n self._breakColumn = item._breakColumn\r\n self._rect = item._rect\r\n self._font = item._font\r\n self._textColour = item._textColour\r\n self._bitmap = item._bitmap\r\n self._description = item._description\r\n self._rowPos = item._rowPos\r\n self._colPos = item._colPos\r\n self._window = item._window", "def create(self, request, parsed_request_fields, extra_fields,\n local_site=None, *args, **kwargs):\n return self._create_or_update(request, parsed_request_fields,\n extra_fields, None, local_site)", "def get_item_data(item):\n\n return OnedriveItem(\n id=item.get('id'),\n name=item.get('name'),\n web_url=item.get('webUrl'),\n created_by=item.get('createdBy')\n ).__dict__", "def get_item(self, id: str, user: User) -> Optional[T]:", "def create_module_item(self, module_item, **kwargs):\n\n unrequired_types = [\"ExternalUrl\", \"Page\", \"SubHeader\"]\n\n if isinstance(module_item, dict) and \"type\" in module_item:\n # content_id is not required for unrequired_types\n if module_item[\"type\"] in unrequired_types or \"content_id\" in module_item:\n kwargs[\"module_item\"] = module_item\n else:\n raise RequiredFieldMissing(\n \"Dictionary with key 'content_id' is required.\"\n )\n else:\n raise RequiredFieldMissing(\"Dictionary with key 'type' is required.\")\n\n response = self._requester.request(\n \"POST\",\n \"courses/{}/modules/{}/items\".format(self.course_id, self.id),\n _kwargs=combine_kwargs(**kwargs),\n )\n module_item_json = response.json()\n module_item_json.update({\"course_id\": self.course_id})\n\n return ModuleItem(self._requester, module_item_json)", "def __init__(self, unique_user_ids, unique_item_ids, **kwargs):\n super().__init__(**kwargs)\n self.user_model = EntityModel(unique_user_ids)\n self.item_model = EntityModel(unique_item_ids)\n self.logger = logging.getLogger()", "def deriveItem(self, url):\n item_id = url.split('/')[-1]\n # Fetch item and area\n version = 'latest' #TODO: honour the version in the xml payload\n content_tool = getToolByName(self.context, 'content')\n item = content_tool.getRhaptosObject(item_id, version)\n area = self.context\n # We create a copy that we want to clean up later, let's track the id\n to_delete_id = area.generateUniqueId()\n area.invokeFactory(id=to_delete_id, type_name=item.portal_type)\n obj = area._getOb(to_delete_id)\n\n # item must be checked out to area before a fork is possible\n obj.setState('published')\n obj.checkout(item.objectId)\n\n # Do the fork\n forked_obj = obj.forkContent(license='', return_context=True)\n forked_obj.setState('created')\n forked_obj.setGoogleAnalyticsTrackingCode(None)\n\n # remove all roles except those of the author\n forked_obj.resetOptionalRoles()\n # should not be necessary...\n forked_obj.deleteCollaborationRequests()\n owner_id = forked_obj.Creator()\n for user_id in forked_obj.getCollaborators():\n if user_id != owner_id:\n forked_obj.removeCollaborator(user_id)\n\n # Delete temporary copy\n if to_delete_id:\n area.manage_delObjects(ids=[to_delete_id])\n return forked_obj", "def create_sample_order_item(item, quantity, data_only):\n order_item_info = {\n 'item': item.pk,\n 'quantity': quantity\n }\n if data_only:\n return order_item_info\n\n else:\n order_item_obj = OrderItem.objects.create(\n item=item,\n quantity=quantity\n )\n order_item_info[\"id\"] = order_item_obj.id\n return order_item_obj, order_item_info", "def perform_create(self, serializer):\n item = models.ProfileItem.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(profile_item=item)", "def clean_copy(self) -> \"StorableObject\":\n return StorableObject(\n id=self.id, data=self.data, tags=self.tags, description=self.description\n )", "def __init__(self, **kwargs):\n self.item = Item(**kwargs)\n self._working_dir = None", "def item_duplicate():\n return {'name':'chair',\n 'value':300}", "def __load_item(item):\n\n itm = dtf.core.item.Item()\n\n itm.name = item.name\n itm.type = item.type\n\n itm.install_name = get_item_attrib(item, \"install_name\")\n itm.local_name = None\n itm.author = get_item_attrib(item, \"author\")\n itm.about = get_item_attrib(item, \"about\")\n itm.version = get_item_attrib(item, \"version\")\n\n return itm", "def newRequest(self):\n return Request( )", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def process_item(self, item, spider):\n\n session = self.Session()\n\n try:\n CreateOrUpdate().create_or_update(item=item, session=session)\n session.commit()\n except:\n # undo in case of errors\n session.rollback()\n raise\n finally:\n session.close()\n\n return item", "def mocked_item(mocked_session):\n test_item = mock.Mock()\n test_item.session = mocked_session\n return test_item", "def make_reusable_item_public(id):\n reusableitem = ReusableItem.objects.get(pk=id)\n reusableitem.is_public = True\n reusableitem.save()\n\n return reusableitem", "def create(self, validated_data):\n\t\tvalidated_data['user'] = self.context['request'].user\n\t\treturn super(FullBoxSerializer, self).create(validated_data)", "def ajax_user(self, user):\r\n return UserTableItem(user, self.type, self.cells, self.container_name,\r\n self.editable)", "def new(self):\n return get_data_for_new_edit(dict(request.GET))", "def get_user_noreq(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n borrowing = Borrowing(user=target_user.first(), item=item, borrowing_by=request.user)\n borrowing.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def process_item(self, item, spider):\n session = self.Session()\n real = Reals(**item)\n\n try:\n session.add(real)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def get_or_create_from_request(self, request, user=None):\n request_user = None\n if hasattr(request, 'user') and request.user and request.user.is_authenticated:\n request_user = request.user\n\n if not request_user and (not user or not user.is_authenticated):\n return None\n\n session_user = user if user else request_user\n tunnistamo_session_id = request.session.get(\"tunnistamo_session_id\")\n\n tunnistamo_session = None\n if tunnistamo_session_id:\n try:\n tunnistamo_session = self.get(\n pk=tunnistamo_session_id,\n user=session_user,\n )\n except TunnistamoSession.DoesNotExist:\n pass\n\n if not tunnistamo_session:\n tunnistamo_session = self.create(\n user=session_user,\n created_at=now(),\n )\n request.session[\"tunnistamo_session_id\"] = str(tunnistamo_session.id)\n\n return tunnistamo_session", "def load(self, request, item, linked_item, extra):\n\t\tfetch_comments = Comment.objects.filter(item = item).order_by('date_time')\n\t\tgravatar_queryset(fetch_comments)\n\n\t\textra['comments'] = fetch_comments\n\t\textra['buttons_update'] = True\n\t\treturn {\n\t\t\t'subject' : item.description,\n\t\t\t'tags' : item.tags,\n\t\t\t'text' : linked_item.text,\n\t\t\t'priority' : item.priority.id,\n\t\t\t'delivery_notes' : linked_item.delivery_notes,\n\t\t}", "def new(self, *args, **kwargs):\n return flattrclient.things.Thing(session=self._session, **kwargs)", "def itemFromProxy(obj):\n return object.__getattribute__(obj, '_sharedItem')", "def share_sharedlist(shared_list_id):\n\n email = request.form[\"email\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n shared_user = User.query.filter_by(email=email).first()\n\n if not shared_user:\n flash(\"No user found. Please enter a valid email.\")\n return redirect(f\"/shared-lists/{shared_list_id}\")\n\n shared_user.shared_lists.append(to_do_list)\n flash(f\"This list has been shared with {shared_user.name}!\")\n db.session.add(shared_user)\n db.session.commit()\n\n return redirect(f\"/shared-lists/{shared_list_id}\")", "def process_item(self, item, spider):\n if item['name'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.ids_seen.add(item['name'])\n return item #return the item", "def copy(self):\n r = PredictionJobRequest()\n r.__dict__.update(self.__dict__)\n\n return r", "def __init__(self, sharedItem, sharedInterfaces, shareID):\n rself = _really(self)\n rself._sharedItem = sharedItem\n rself._shareID = shareID\n rself._adapterCache = {}\n # Drop all duplicate shared interfaces.\n uniqueInterfaces = list(sharedInterfaces)\n # XXX there _MUST_ Be a better algorithm for this\n for left in sharedInterfaces:\n for right in sharedInterfaces:\n if left.extends(right) and right in uniqueInterfaces:\n uniqueInterfaces.remove(right)\n for eachInterface in uniqueInterfaces:\n if not eachInterface.providedBy(sharedItem):\n impl = eachInterface(sharedItem, None)\n if impl is not None:\n rself._adapterCache[eachInterface] = impl\n rself._sharedInterfaces = uniqueInterfaces\n # Make me look *exactly* like the item I am proxying for, at least for\n # the purposes of adaptation\n # directlyProvides(self, providedBy(sharedItem))\n directlyProvides(self, uniqueInterfaces)", "def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None", "def copy(self):\n cls = type(self)\n # Create a new instance without calling __init__: parameters are\n # different depending on the class.\n new_box = cls.__new__(cls)\n # Copy attributes\n new_box.__dict__.update(self.__dict__)\n return new_box", "def copy(self) -> ItemVariant:\n return ItemVariant(\n self.pak_id,\n self.editor,\n self.vbsp_config,\n self.editor_extra.copy(),\n self.authors.copy(),\n self.tags.copy(),\n self.desc,\n self.icons.copy(),\n self.ent_count,\n self.url,\n self.all_name,\n self.all_icon,\n self.source,\n )", "def copy(self):\n return TodoList(self)", "def create(cls, user_id, song_id):\n entity = cls(\n user_id=user_id,\n song_id=song_id,\n )\n entity.put()\n\n return entity", "def get_or_add(self, *args, **kwargs):\n\n key = LazyModelObject.get_identifier(*args, **kwargs)\n try:\n return self[key]\n except KeyError:\n item = LazyModelObject(*args, **kwargs)\n if not item:\n item = None\n self[key] = item\n return item", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def bookmark(user_id, item_id):\n Bookmark.objects.get_or_create(user=User.objects.get(pk=user_id),\n item=Item.objects.get(pk=item_id))", "def get_object(self,pk):\n # return Person.objects.get(user=self.request.user)\n p,c = Person.objects.get_or_create(user_id=pk)\n return p", "def create(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n profile = response.data\n user_name = profile.get(\"username\")\n cache.set(f\"{USER_PROFILE_PREFIX}{user_name}\", profile)\n return response", "def make_prefetched_get_item(\n item: Item,\n refetch_getter: ItemGetter = strongly_consistent_get_item,\n *,\n nicename: str = DEFAULT_ITEM_NAME,\n):\n used = False\n\n def prefetched_get_item(table, key: ItemKey) -> Item:\n nonlocal used\n\n if not used:\n used = True\n return item\n return _nicename_getter(nicename, refetch_getter)(table, key)\n\n return prefetched_get_item", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def make(self, item):\n self.name = item.get(\"name\", \"\")\n self.description = item.get(\"description\", \"\")\n self.type = item.get(\"type\", \"filler\")\n if not isinstance(self.type, str) or self.type is None:\n self.usable = NotUsable\n elif len(self.type) > 1:\n self.set_usable(self.type)\n else:\n self.usable = NotUsable" ]
[ "0.7722135", "0.64705783", "0.6047539", "0.59654415", "0.5869505", "0.57898974", "0.5785594", "0.57411844", "0.57283014", "0.5641411", "0.5534432", "0.5511725", "0.55054003", "0.5462437", "0.5369579", "0.5337859", "0.53142005", "0.5303614", "0.52669346", "0.52664846", "0.52594626", "0.52514905", "0.52438545", "0.5240882", "0.52365786", "0.5230385", "0.5223718", "0.5216448", "0.52145505", "0.52056813", "0.51915205", "0.5168248", "0.51614946", "0.5161258", "0.5152068", "0.5136816", "0.51219237", "0.5108996", "0.51069814", "0.5078682", "0.5078074", "0.5077304", "0.5071374", "0.50641984", "0.5058256", "0.50573766", "0.5053917", "0.50513124", "0.50462955", "0.50328386", "0.50276935", "0.5011451", "0.5011451", "0.5011406", "0.50032204", "0.50018775", "0.49915475", "0.49820012", "0.49748635", "0.49629998", "0.49627274", "0.49548036", "0.49484283", "0.4941461", "0.49287456", "0.4927231", "0.49113786", "0.49073476", "0.49059138", "0.49026528", "0.49000847", "0.48881707", "0.48856348", "0.48821476", "0.48610377", "0.4857339", "0.48401034", "0.48400003", "0.48369685", "0.48337382", "0.48321936", "0.48303616", "0.48221543", "0.48207814", "0.48206696", "0.48190957", "0.4816118", "0.481313", "0.48002216", "0.4798115", "0.4789069", "0.47882503", "0.47881734", "0.47814995", "0.47750884", "0.47734678", "0.47663093", "0.47639444", "0.4761921", "0.4761273" ]
0.7739693
0
Test FeathrClient() get_online_features and batch_get can get data correctly.
def test_feathr_online_store_agg_features(): online_test_table = get_online_test_table_name("nycTaxiCITableMaven") test_workspace_dir = Path( __file__).parent.resolve() / "test_user_workspace" # os.chdir(test_workspace_dir) # The `feathr_runtime_location` was commented out in this config file, so feathr should use # Maven package as the dependency and `noop.jar` as the main file client: FeathrClient = basic_test_setup(os.path.join(test_workspace_dir, "feathr_config_maven.yaml")) location_id = TypedKey(key_column="DOLocationID", key_column_type=ValueType.INT32, description="location id in NYC", full_name="nyc_taxi.location_id") feature_query = FeatureQuery( feature_list=["f_location_avg_fare"], key=location_id) settings = ObservationSettings( observation_path="wasbs://public@azurefeathrstorage.blob.core.windows.net/sample_data/green_tripdata_2020-04.csv", event_timestamp_column="lpep_dropoff_datetime", timestamp_format="yyyy-MM-dd HH:mm:ss") now = datetime.now() # set output folder based on different runtime if client.spark_runtime == 'databricks': output_path = ''.join(['dbfs:/feathrazure_cijob','_', str(now.minute), '_', str(now.second), ".avro"]) else: output_path = ''.join(['abfss://feathrazuretest3fs@feathrazuretest3storage.dfs.core.windows.net/demo_data/output','_', str(now.minute), '_', str(now.second), ".avro"]) client.get_offline_features(observation_settings=settings, feature_query=feature_query, output_path=output_path) # assuming the job can successfully run; otherwise it will throw exception client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) return backfill_time = BackfillTime(start=datetime( 2020, 5, 20), end=datetime(2020, 5, 20), step=timedelta(days=1)) redisSink = RedisSink(table_name=online_test_table) settings = MaterializationSettings("TestJobName", sinks=[redisSink], feature_names=[ "f_location_avg_fare", "f_location_max_fare"], backfill_time=backfill_time) client.materialize_features(settings) # just assume the job is successful without validating the actual result in Redis. Might need to consolidate # this part with the test_feathr_online_store test case client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) res = client.get_online_features(online_test_table, '265', [ 'f_location_avg_fare', 'f_location_max_fare']) # just assume there are values. We don't hard code the values for now for testing # the correctness of the feature generation should be guaranteed by feathr runtime. # ID 239 and 265 are available in the `DOLocationID` column in this file: # https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2020-04.csv # View more details on this dataset: https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page assert len(res) == 2 assert res[0] != None assert res[1] != None res = client.multi_get_online_features(online_test_table, ['239', '265'], ['f_location_avg_fare', 'f_location_max_fare']) assert res['239'][0] != None assert res['239'][1] != None assert res['265'][0] != None assert res['265'][1] != None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get(self):\n simple_fields = {\n \"verbose\": False,\n \"min_core_neighbors\": self.min_core_neighbors,\n \"num_features\": 1,\n \"num_unpacked_features\": 2,\n \"num_distance_components\": 1,\n \"radius\": self.radius,\n \"num_examples\": 30,\n }\n\n for field, ans in simple_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))\n\n _list_fields = {\n \"distance\": self.distance,\n \"unpacked_features\": [\"X1[0]\", \"X1[1]\"],\n \"features\": [\"X1\"],\n }\n\n for field, ans in _list_fields.items():\n self.assertItemsEqual(\n self.model._get(field), ans, \"{} failed\".format(field)\n )\n self.assertGreaterEqual(self.model.training_time, 0)\n self.assertGreaterEqual(self.model.num_clusters, 0)\n self.assertEqual(self.model.cluster_id.num_rows(), 30)", "def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_get(self):\n # Set up\n self.assertTrue(\n # Login into browser session as developer\n self.client.login(username=self.developer.username, password=\"password\")\n )\n # Get expected data\n objs = ServicePattern.objects.all().add_service_name()\n serializer = ServicePatternSerializer(objs, many=True)\n expected = serializer.data\n\n # Test - Get response from API\n response = self.client.get(self.feed_list_url, HTTP_HOST=self.hostname)\n actual = response.data[\"features\"]\n\n # Assert\n self.assertEqual(actual, expected[\"features\"])\n\n feature = actual[0]\n fields = [\n \"service_pattern_id\",\n \"revision\",\n \"origin\",\n \"destination\",\n \"description\",\n \"service_name\",\n ]\n for field in fields:\n self.assertIn(field, feature[\"properties\"].keys())", "def do_features_request_2(features=None):\n\n #  connect to database\n cur_db = connect_db(\"172.20.38.50\", \"mvelay\", \"user\", \"sandbox\")\n cursor = cur_db.cursor()\n\n # build whole query\n cur_query = \"\"\" SELECT module, sw, version FROM t_feature\n WHERE feature=\"%s\" AND supported=1;\"\"\" % (features[0])\n\n print cur_query\n cursor.execute(cur_query)\n results = cursor.fetchall()\n cursor.close()\n\n if results:\n results = results[:1000] # Limit to first 1000 results\n else:\n results = None\n\n return features[0], results", "async def getFeatures(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getFeatures()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getFeatures\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getFeatures\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/feature\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def test_get_run(self):\n pass", "def test_client_retrieve(self):\n pass", "def test_for_client():", "def test_get_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.get_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]", "def test_gettem_using_get(self):\n pass", "def batch_read_feature_values(\n self,\n ) -> Callable[\n [featurestore_service.BatchReadFeatureValuesRequest],\n Awaitable[operations_pb2.Operation],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"batch_read_feature_values\" not in self._stubs:\n self._stubs[\"batch_read_feature_values\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues\",\n request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"batch_read_feature_values\"]", "def test_get_distribution_no_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)\r\n\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + u'?feature=')\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)", "def test_batch(self):\n pass", "def test_get(self):\n log.info(\"START INTEG TEST GET\")\n\n # Start sampling.\n self.clear_sample_data()\n self.driver.start_sampling()\n self.clear_async_data()\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity record, time record.\n log.info(\"FIRST FILE A0000002 INTEG TEST GET\")\n self.create_sample_data('valid_A0000002.DEC', \"A0000002.DEC\")\n self.assert_data(None, 'valid_A0000002.yml', \n count=3, timeout=10)\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity records twice, time record.\n log.info(\"SECOND FILE A0000004 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n self.assert_data(None, 'valid_A0000004.yml', \n count=5, timeout=10)\n\n # Made-up data with all flags set to True.\n # Field values may not be realistic.\n log.info(\"THIRD FILE A0000003 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_data(None, 'all_A0000003.yml', \n count=4, timeout=10)\n log.info(\"END INTEG TEST GET\")", "def test_available_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.available_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa2\"]", "def test_get(self):\n pass", "def get_feature(\n self,\n ) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_feature\" not in self._stubs:\n self._stubs[\"get_feature\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature\",\n request_serializer=featurestore_service.GetFeatureRequest.serialize,\n response_deserializer=feature.Feature.deserialize,\n )\n return self._stubs[\"get_feature\"]", "def test2():\n\n # Internal Feature Layers\n feature_queries = []\n feature_layers = db(db.gis_layer_feature.resource == \"office\").select()\n for layer in feature_layers:\n if layer.role_required and not auth.s3_has_role(layer.role_required):\n continue\n _layer = gis.get_feature_layer(layer.module,\n layer.resource,\n layer.name,\n layer.popup_label,\n config=config,\n marker_id=layer.marker_id,\n active=layer.visible,\n polygons=layer.polygons,\n opacity=layer.opacity)\n if _layer:\n # Add a URL for downloading the GeoJSON\n # @ToDO: add to gis.get_feature_layer\n _layer[\"url\"] = \"%s.geojson\" % URL(r=request, c=layer.module, f=layer.resource)\n marker = db(db.gis_marker.id == _layer[\"marker\"]).select(db.gis_marker.image,\n db.gis_marker.height,\n db.gis_marker.width,\n limitby=(0, 1)).first()\n _layer[\"marker\"] = marker\n feature_queries.append(_layer)\n\n return dict(feature_queries=feature_queries)", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def test_api_msa_endpoint(self):\n params = {'lender': '90000451965', 'metro': '49180'}\n url = reverse(msa)\n resp = self.client.get(url, params)\n result_dict = json.loads(resp.content)\n self.assertTrue(isinstance(result_dict, dict))\n self.assertContains(resp, 'features')", "def test_module(client: Client, *args) -> Tuple[str, dict, dict]:\n\n client.run_parameters_validations()\n\n for service in client.services:\n # if there are risk rules, select the first one for test\n risk_rule = client.risk_rule[0] if client.risk_rule else None\n client.build_iterator(service, client.indicator_type, risk_rule)\n client.get_batches_from_file(limit=1)\n return 'ok', {}, {}", "def test_get_client(self):\n pass", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def list_features(\n self,\n ) -> Callable[\n [featurestore_service.ListFeaturesRequest],\n Awaitable[featurestore_service.ListFeaturesResponse],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_features\" not in self._stubs:\n self._stubs[\"list_features\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures\",\n request_serializer=featurestore_service.ListFeaturesRequest.serialize,\n response_deserializer=featurestore_service.ListFeaturesResponse.deserialize,\n )\n return self._stubs[\"list_features\"]", "def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n hbase = HbaseUtils(self._get_conn(config))\n project = config.project\n table_name = _table_id(project, table)\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n row_keys = [\n serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n ).hex()\n for entity_key in entity_keys\n ]\n rows = hbase.rows(table_name, row_keys=row_keys)\n\n for _, row in rows:\n res = {}\n res_ts = None\n for feature_name, feature_value in row.items():\n f_name = HbaseConstants.get_feature_from_col(feature_name)\n if requested_features is not None and f_name in requested_features:\n v = ValueProto()\n v.ParseFromString(feature_value)\n res[f_name] = v\n if f_name is HbaseConstants.EVENT_TS:\n ts = struct.unpack(\">L\", feature_value)[0]\n res_ts = datetime.fromtimestamp(ts)\n if not res:\n result.append((None, None))\n else:\n result.append((res_ts, res))\n return result", "def test_get_learners(self):\n pass", "def test_get_with_filter_factoid(mockclient_cl1):\n r = mockclient_cl1.get(TEST_URL + \"?size=100&f=F00062\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 3", "def test_get(self):\r\n resp = self.client.get_json(self.url + '/0')\r\n self.assertEqual(resp.status_code, 200)\r\n obj = json.loads(resp.content)\r\n self.assertEqual(self.starting_graders[0], obj)", "def test_api_predictors_get(self):\n pass", "def test_GET_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_GET_request(\n GET_ECHO_ENDPOINT,\n params=params\n )\n\n ## test that response json can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def test_get_query_with_api_key(self):\r\n users = UserFactory.create_batch(3)\r\n app = AppFactory.create(owner=users[0], info={'total': 150})\r\n task = TaskFactory.create(app=app, info={'url': 'my url'})\r\n taskrun = TaskRunFactory.create(task=task, user=users[0],\r\n info={'answer': 'annakarenina'})\r\n for endpoint in self.endpoints:\r\n url = '/api/' + endpoint + '?api_key=' + users[1].api_key\r\n res = self.app.get(url)\r\n data = json.loads(res.data)\r\n\r\n if endpoint == 'app':\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'task':\r\n assert len(data) == 1, data\r\n task = data[0]\r\n assert task['info']['url'] == 'my url', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'taskrun':\r\n assert len(data) == 1, data\r\n taskrun = data[0]\r\n assert taskrun['info']['answer'] == 'annakarenina', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'user':\r\n assert len(data) == 3, data\r\n user = data[0]\r\n assert user['name'] == 'user1', data\r\n assert res.mimetype == 'application/json', res", "def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)", "def GetFeatures(self):\n try:\n return self._SendRequest(HTTP_GET, \"/%s/features\" % GANETI_RAPI_VERSION,\n None, None)\n except GanetiApiError as err:\n # Older RAPI servers don't support this resource\n if err.code == HTTP_NOT_FOUND:\n return []\n\n raise", "def _run_online_test(*args, **kwargs):\n import responses # noqa: F401", "def test_on_clients(self, round, dataflag='valid'):\r\n accs, losses = [], []\r\n for c in self.clients:\r\n c.setModel(self.model)\r\n acc, loss = c.test(dataflag)\r\n accs.append(acc)\r\n losses.append(loss)\r\n return accs, losses", "def test_client_nationlity_retrieve(self):\n pass", "def test_api_predictor_events_get(self):\n pass", "def test_get_results(self):\n pass", "def test_query_train_jobs(self, client):\n params = dict(offset=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def test_can_return_all_current_features_only(self):\n returned_features = return_current_features()\n self.assertTrue(len(returned_features) > 0)\n for feature in returned_features:\n self.assertTrue(feature.is_feature)\n feature_admin_object = SuggestionAdminPage.objects.get(suggestion=feature)\n self.assertTrue(feature_admin_object.in_current_voting_cycle)\n\n all_current_features_admin = SuggestionAdminPage.objects.filter(suggestion__is_feature=True,\n in_current_voting_cycle=True)\n self.assertEqual(len(all_current_features_admin), len(returned_features))", "def get_all_features(self) :\n raise NotImplementedError", "def test_all_features_paged(\n self, mp_wfs, mp_get_schema, mp_remote_describefeaturetype,\n mp_wfs_max_features, mp_remote_wfs_paged_feature):\n s = BoringSearch()\n df = s.search(query=PropertyIsGreaterThanOrEqualTo(\n 'diepte_tot_m', '0'), return_fields=['pkey_boring'])\n assert len(df) == 20", "def test_feedback_request(get_interface_params, feedback_mapping, protocol_name):\n from sail_on_client.protocol.localinterface import LocalInterface\n\n config_directory, config_name = get_interface_params\n local_interface = LocalInterface(config_name, config_directory)\n session_id = _initialize_session(local_interface, protocol_name)\n # Post results before posting\n result_files = {}\n protocol_constant = feedback_mapping[0]\n required_files = feedback_mapping[1]\n for required_file in required_files:\n result_files[required_file] = os.path.join(\n os.path.dirname(__file__), f\"test_results_{protocol_name}.1.1.1234.csv\"\n )\n local_interface.post_results(\n result_files, f\"{protocol_name}.1.1.1234\", 0, session_id\n )\n # Get feedback for detection\n response = local_interface.get_feedback_request(\n [\"n01484850_18013.JPEG\", \"n01484850_24624.JPEG\"],\n protocol_constant,\n f\"{protocol_name}.1.1.1234\",\n 0,\n session_id,\n )\n expected = os.path.join(\n local_interface.result_directory,\n f\"{session_id}.{protocol_name}.1.1.1234.0_{protocol_constant}.csv\",\n )\n assert expected == response", "def test_get_cloud_resources(self):\n pass", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n # Make mock of Google Flights API call\n def _mock_flight_results(parameter):\n return functions.flight_results_from_file('seed_data/testflights.txt')\n\n functions.flight_results = _mock_flight_results\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def test_get_stats(self):\n pass", "def load_features(self, test: UserModel):\n\n # Requesting the image extraction details from lambda.\n test_image_dict = None\n for i in range(5):\n print(f\"Requesting OpenCV results {test.get_id()}, try {i}...\")\n extract_feats = requests.get(self.__LAMBDA_ENDPOINT, params={\"image_no\": str(test.get_id())})\n print(extract_feats.status_code, extract_feats.json())\n # Checking if an OK result is coming.\n if extract_feats.status_code == 200:\n # Checking if the result is received.\n if extract_feats.json()['test_image'] is None:\n # When OpenCV lambda crashes.\n if i == 0:\n break\n time.sleep(60)\n continue\n # When the result is received.\n else:\n test_image_dict = extract_feats.json()['test_image']\n break\n # When a bad Gateway or other server errors happen.\n else:\n time.sleep(60)\n\n # When a result is not received.\n if test_image_dict is None:\n print(f\"ERROR - OpenCV results {test.get_id()} didn't received\")\n test_image = None\n # When a result is received.\n else:\n print(f\"OpenCV results {test.get_id()} received\")\n test_image = TestImageBuilder() \\\n .set_rms(test_image_dict['rms']) \\\n .set_std_deviation_st_ht(test_image_dict['rms']) \\\n .set_max_between_st_ht(test_image_dict['rms']) \\\n .set_min_between_st_ht(test_image_dict['rms']) \\\n .set_mrt(test_image_dict['rms']) \\\n .set_max_ht(test_image_dict['rms']) \\\n .set_min_ht(test_image_dict['rms']) \\\n .set_std_ht(test_image_dict['rms']) \\\n .set_changes_from_negative_to_positive_between_st_ht(test_image_dict['rms']) \\\n .build()\n\n # User object is created\n self.__user = User(\n test_image=test_image,\n age=test.get_age(),\n gender=test.get_gender(),\n handedness=test.get_handedness())", "def testDataFeed(self):\n\n start_date = '2008-10-01'\n end_date = '2008-10-02'\n metrics = 'ga:visits'\n\n if not conf.options.get_value('runlive') == 'true':\n return\n conf.configure_cache(self.client, 'testDataFeed')\n\n data_query = gdata.analytics.client.DataFeedQuery({\n 'ids': conf.options.get_value('table_id'),\n 'start-date': start_date,\n 'end-date': end_date,\n 'metrics': metrics,\n 'max-results': '1'\n })\n feed = self.client.GetDataFeed(data_query)\n\n self.assertTrue(feed.entry is not None)\n self.assertEqual(feed.start_date.text, start_date)\n self.assertEqual(feed.end_date.text, end_date)\n self.assertEqual(feed.entry[0].GetMetric(metrics).name, metrics)", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def test_can_get_feeds(self, settings):\n Entry.single_register(\n Entry.GET,\n f\"{settings.CAVL_URL}/feed\",\n body=json.dumps(\n [\n {\n \"id\": 1,\n \"publisherId\": 1,\n \"url\": \"https://www.siri-feed.com\",\n \"username\": \"12345\",\n \"password\": None,\n \"status\": \"FEED_UP\",\n \"created\": None,\n \"modified\": None,\n }\n ]\n ),\n headers={\"content-type\": \"application/json\"},\n )\n\n with Mocketizer():\n cavl_service = CAVLService()\n result = cavl_service.get_feeds()\n\n assert len(result) == 1\n assert result[0] == AVLFeed(\n id=1,\n publisher_id=1,\n url=\"https://www.siri-feed.com\",\n username=\"12345\",\n password=None,\n status=AVLFeedStatus.FEED_UP,\n )", "def test_module(client: Client) -> str:\n\n message: str = ''\n try:\n result = client.get(AHA_TYPE.FEATURES, '', '', '2020-01-01', page='1', per_page='1')\n if result:\n message = 'ok'\n except DemistoException as e:\n if 'Forbidden' in str(e) or 'Authorization' in str(e):\n message = 'Authorization Error: make sure that the API Key is setup correctly.'\n else:\n raise e\n return message", "def compute_testing_features(\n test_df, feature_engineering_pipeline, feature_config_list=None, train_df=None,\n):\n if train_df is not None and feature_config_list is not None:\n train_df_arguments = {}\n for feature_config in feature_config_list:\n feature_step_name = feature_config[0]\n if feature_step_name in FEATURES_REQUIRE_TRAINING_DATA:\n train_df_arguments[feature_step_name + \"__train_df\"] = train_df\n if len(train_df_arguments) > 0:\n feature_engineering_pipeline.set_params(**train_df_arguments)\n\n test_features = feature_engineering_pipeline.transform(test_df)\n\n return test_features", "def test_fax_inbound_automations_get(self):\n pass", "def input_fn(self,features, labels, batch_size, shuffle_num, mode):\r\n dataset = tf.data.Dataset.from_tensor_slices((features, labels))\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n dataset = dataset.shuffle(shuffle_num).batch(batch_size).repeat(self.epochs)\r\n else:\r\n dataset = dataset.batch(batch_size)\r\n iterator = dataset.make_one_shot_iterator()\r\n data, labels = iterator.get_next()\r\n return data, labels", "def test_listtem_using_get(self):\n pass", "def test_training(self):\n\t\tpass", "def test_get_token_supply_all_using_get(self):\n pass", "def test_get_api_resources(self):\n pass", "def test_module():\n try:\n bigquery_client = start_and_return_bigquery_client(demisto.params()['google_service_creds'])\n query_job = bigquery_client.query(TEST_QUERY)\n query_results = query_job.result()\n results_rows_iterator = iter(query_results)\n next(results_rows_iterator)\n demisto.results(\"ok\")\n except Exception as ex:\n return_error(\"Authentication error: credentials JSON provided is invalid.\\n Exception recieved:\"\n \"{}\".format(ex))", "def test_module():\n try:\n bigquery_client = start_and_return_bigquery_client(demisto.params()['google_service_creds'])\n query_job = bigquery_client.query(TEST_QUERY)\n query_results = query_job.result()\n results_rows_iterator = iter(query_results)\n next(results_rows_iterator)\n demisto.results(\"ok\")\n except Exception as ex:\n return_error(\"Authentication error: credentials JSON provided is invalid.\\n Exception recieved:\"\n \"{}\".format(ex))", "def batch_create_features(\n self,\n ) -> Callable[\n [featurestore_service.BatchCreateFeaturesRequest],\n Awaitable[operations_pb2.Operation],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"batch_create_features\" not in self._stubs:\n self._stubs[\"batch_create_features\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures\",\n request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"batch_create_features\"]", "def test_get_scenarios(self):\n pass", "def train_client(self,fvects,no_features): \n # print train statistics. \n self.N = len(fvects)\n self.initialize_weights()\n if self.n == 2:\n print \"Binary Logistic Regression\"\n else:\n print \"Multi-class (%d classes) Logistic Regression\" % self.n\n print \"L2 regularization coefficient = %f\" % self.c\n print \"Total iterations = %d\" % self.total_iterations\n print \"Initial learning rate = %f\" % self.eta0\n print \"Total number of instances = %d\" % self.N\n self.k = 1\n self.s = 0\n RA = ROLLING_AVERAGER(self.L2_rolling, self.L2_bound)\n # Iterate over the training dataset.\n for i in range(1, self.total_iterations+1):\n print \"\\nIteration #%d\" % i,\n startTime = time.time()\n self.loss = 0\n count = 0\n for fv in fvects:\n count += 1\n eta = float(self.eta0) / (1. + (float(count)/self.N))\n self.update(fv,eta)\n self.k += 1\n endTime = time.time()\n print \"time taken (sec)=\", (endTime-startTime)\n # Show the value of the bias term.\n if self.verbose:\n for lbl in self.bias:\n print \"Bias Term %d = %f\" % (lbl,self.bias[lbl])\n (L1_norm, L2_norm, actives) = self.get_norms()\n self.active_features = actives\n print \"Active Features = %d/%d\" % (actives,no_features) \n print \"L1 norm = %f\" % L1_norm\n print \"L2 norm = %f\" % L2_norm\n if RA.add(L2_norm) == 1:\n print \"Terminating...L2 norm does not change\"\n break\n if self.verbose:\n self.display_training_error(fvects)\n if self.heldoutVects:\n self.display_heldout_error(self.heldoutVects) \n # if not in the verbose mode then print the final results.\n if not self.verbose:\n trainError = self.display_training_error(fvects)\n if self.heldoutVects:\n self.display_heldout_error(self.heldoutVects) \n pass", "def test_get_cloud(self):\n pass", "def test_get1(self):\n pass", "def test_get_hyperflex_feature_limit_external_list(self):\n pass", "def get_features_from_feature_server(url, query):\n\n logger.debug('url received: ' + url + ', query received: ' + query)\n\n features = []\n f = FeatureLayer(url = url)\n feature_set = f.query(where = query)\n for feature in feature_set:\n features.append(feature.as_dict)\n return features", "def test_get2(self):\n pass", "def test_distributed_feature_extraction():\n # set up parameters\n testcol = testcol_dist\n exp_id = 'validation1'\n\n params = {}\n\n model_params = {'func': model.mnist_tfutils,\n 'devices': ['/gpu:0', '/gpu:1']}\n\n params['model_params'] = model_params\n\n params['load_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0'}\n\n params['save_params'] = {'exp_id': exp_id,\n 'save_intermediate_freq': 1,\n 'save_to_gfs': ['features', 'more_features']}\n\n targdict = {'func': get_extraction_target,\n 'to_extract': {'features': 'model_0/validation/valid1/hidden1/output:0',\n 'more_features': 'model_0/validation/valid1/hidden2/output:0'}}\n\n targdict.update(base.DEFAULT_LOSS_PARAMS)\n\n validation_params = {'valid1': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'targets': targdict,\n 'num_steps': 10,\n 'online_agg_func': utils.reduce_mean_dict}}\n\n params['validation_params'] = validation_params\n params['skip_check'] = True\n\n conn = pm.MongoClient(host=testhost,\n port=testport)\n conn[testdbname][testcol + '.files'].delete_many({'exp_id': exp_id})\n\n # actually run the feature extraction\n base.test_from_params(**params)\n\n # check that things are as expected.\n conn = pm.MongoClient(host=testhost,\n port=testport)\n coll = conn[testdbname][testcol + '.files']\n assert coll.find({'exp_id': exp_id}).count() == 11\n\n # ... load the containing the final \"aggregate\" result after all features have been extracted\n q = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': True}}\n assert coll.find(q).count() == 1\n r = coll.find(q)[0]\n # ... check that the record is well-formed\n asserts_for_record(r, params, train=False)\n\n # ... check that the correct \"intermediate results\" (the actual features extracted) records exist\n # and are correctly referenced.\n q1 = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': False}}\n ids = coll.find(q1).distinct('_id')\n assert r['validation_results']['valid1']['intermediate_steps'] == ids\n\n # ... actually load feature batch 3\n idval = r['validation_results']['valid1']['intermediate_steps'][3]\n fn = coll.find({'item_for': idval})[0]['filename']\n fs = gridfs.GridFS(coll.database, testcol)\n fh = fs.get_last_version(fn)\n saved_data = cPickle.loads(fh.read())\n fh.close()\n first_results = saved_data['validation_results']['valid1']\n assert 'features' in first_results and 'more_features' in first_results\n features = saved_data['validation_results']['valid1']['features']\n more_features = saved_data['validation_results']['valid1']['more_features']\n assert features.shape == (100, 128)\n assert features.dtype == np.float32\n assert more_features.shape == (100, 32)\n assert more_features.dtype == np.float32", "def test_layer_API(self):\n\n # Exceptions\n exclude = ['get_topN', 'get_bins',\n 'get_geotransform',\n 'get_nodata_value',\n 'get_attribute_names',\n 'get_resolution',\n 'get_geometry_type',\n 'get_geometry_name',\n 'to_vector_points',\n 'to_vector_layer']\n\n V = Vector() # Empty vector instance\n R = Raster() # Empty raster instance\n\n assert same_API(V, R, exclude=exclude)\n\n for filename in [os.path.join(TESTDATA,\n 'test_buildings.shp'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n L = read_layer(filename)\n\n assert same_API(L, V, exclude=exclude)\n assert same_API(L, R, exclude=exclude)", "def test_get_basic(client):\n client.delete(\"/businesses\")\n insert_test_data(client)\n rs = client.get(\"/businesses\")\n collection = rs.json[\"result\"][\"businesses\"]\n assert len(collection) == 12", "def test_query(config):\n\n p = PostgreSQLProvider(config)\n feature_collection = p.query()\n assert feature_collection.get('type', None) == 'FeatureCollection'\n features = feature_collection.get('features', None)\n assert features is not None\n feature = features[0]\n properties = feature.get('properties', None)\n assert properties is not None\n geometry = feature.get('geometry', None)\n assert geometry is not None", "def test_ProductsDataViewSet_with_get_request(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/')\n\n # Checking the response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()['count'], 1)\n self.assertEqual(response.json()['next'], None)\n self.assertEqual(response.json()['previous'], None)", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def test_get_offers(self):\n pass", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Make mock of Google Flights API call\n def _mock_flight_results(parameter):\n return functions.flight_results_from_file('seed_data/testflights.txt')\n\n functions.flight_results = _mock_flight_results\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def test_intent_classifier_get_training_samples(self):\n pass", "async def test_32() -> None:\n LOG.debug(\"Test GA4GH Discovery info endpoint\")\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://localhost:5050/service-info\") as resp:\n data = await resp.json()\n # GA4GH Discovery Service-Info is small and its length should be at least 5 (required keys), when the Beacon info is very long\n # https://github.com/ga4gh-discovery/service-info/blob/develop/service-info.yaml\n assert len(data) >= 5, \"Service info size error\" # ga4gh service-info has 5 required keys, and option to add custom keys\n assert data[\"type\"].get(\"group\"), \"Service type error\" # a new key used in beacon network\n assert data[\"type\"][\"group\"].startswith(\"org.ga4gh\"), \"Service type error\" # a new key used in beacon network\n assert resp.status == 200, \"HTTP Status code error\"", "def test_get_scenario(self):\n pass", "async def test_fetch_all_route_job_updates(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56),\n ('sequence_id', 'sequence_id_example'),\n ('include', 'include_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes/job_updates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_25() -> None:\n LOG.debug(\"Test query for targeting three datasets, using ALL. (expect data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 10,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 3, sys.exit(\"Should be able to retrieve data for all datasets.\")", "def test_candidates_retrieve(self):\n pass", "async def test_clips_proxy_view_success(\n hass_client_local_frigate: Any,\n) -> None:\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/clips/present\")\n assert resp.status == HTTP_OK\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/clips/not_present\")\n assert resp.status == HTTP_NOT_FOUND", "def test_query_train_jobs_with_wrong_offse(self, client):\n params = dict(offse=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def build_enru_custom_test(self):\n train_data_file = self.data_dir + '/' + enru_paracrawl\n eval_data_file = self.data_dir + '/' + enru_newscomm\n train_data = tf.data.experimental.CsvDataset(\n [train_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = train_data.cache() # only read once\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.skip(9000).take(10000)\n eval_data = eval_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def _get_test_feed_dict(self, batch):\n raise NotImplementedError", "def test_client_twrr_performance(self):\n pass", "def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?", "def test_query_train_jobs_with_lower_offset(self, client):\n params = dict(offset=-1, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('error_code') == '50540002'", "def get_featurestore(\n self,\n ) -> Callable[\n [featurestore_service.GetFeaturestoreRequest],\n Awaitable[featurestore.Featurestore],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_featurestore\" not in self._stubs:\n self._stubs[\"get_featurestore\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore\",\n request_serializer=featurestore_service.GetFeaturestoreRequest.serialize,\n response_deserializer=featurestore.Featurestore.deserialize,\n )\n return self._stubs[\"get_featurestore\"]", "def test_get_small_and_light_fee_preview(self):\n pass", "async def test_1() -> None:\n LOG.debug(\"Test info endpoint\")\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://localhost:5050/\") as resp:\n data = await resp.json()\n if \"datasets\" in data and len(data[\"datasets\"]) > 0:\n for data_ids in data[\"datasets\"]:\n # In info endpoint we get all dataset ids be them PUBLIC, REGISTERED or CONTROLLED\n assert data_ids[\"id\"] in DATASET_IDS_LIST, \"Dataset ID Error or not in list.\"\n else:\n sys.exit(\"Info Endpoint Error!\")", "def get_features(self, ti=None, tf=None, n_jobs=1, drop_features=[], compute_only_features=[]):\n # initialise training interval\n self.drop_features = drop_features\n self.compute_only_features = compute_only_features\n self.n_jobs = n_jobs\n ti = self.ti_model if ti is None else datetimeify(ti)\n tf = self.tf_model if tf is None else datetimeify(tf)\n return self._load_data(ti, tf)", "def test_get_records(self):\n pass", "def new_features(train, gby_feat, name, is_listen_type_feature, context_features, flow_features, fillna):\n \n # count and ratio on the all train\n count = gby_feat['is_listened'].transform('count')\n train[name + '_count'] = count\n train[name + '_count_bis'] = count\n train[name + '_ratio'] = gby_feat['is_listened'].transform('mean')\n \n if context_features:\n # Count and ratio for context observations\n count = gby_feat['is_context'].transform('sum')\n train[name + '_context_count'] = count\n train[name + '_context_count_bis'] = count\n train[name + '_context_ratio'] = gby_feat['is_listened_context'].transform('sum')/(1.*count)\n # Note that there should be NaN values if count=0.\n if fillna:\n train[name + '_context_ratio'].fillna(0.5, inplace=True)\n \n # Count and ration fot the flow observations\n if is_listen_type_feature:\n if flow_features:\n count = gby_feat['listen_type'].transform('sum')\n train[name + '_flow_count'] = count\n train[name + '_flow_count_bis'] = count\n train[name + '_flow_ratio'] = gby_feat['is_listened_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_flow_ratio'].fillna(0.5, inplace=True)\n \n count = gby_feat['is_context_flow'].transform('sum')\n train[name + '_context_flow_count'] = count\n train[name + '_context_flow_count_bis'] = count\n train[name + '_context_flow_ratio'] = gby_feat['is_listened_context_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_context_flow_ratio'].fillna(0.5, inplace=True)", "def test_online_info_get_online_list_post(self):\n body = GetOnline()\n response = self.client.open(\n '/online_info/get_online_list',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_empty_datasets(self):\n endpoints = [\"elections\", \"races\", \"candidates\", \"votes\", \"types\"]\n for endpoint in endpoints:\n response = self.client.get(\"/api/{}\".format(endpoint),\n headers=[(\"Accept\", \"application/json\")])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data, [])", "async def test_fgsm(): \n #fgsm algo option:\n r = {}\n async with AsyncClient(app=app, base_url=\"http://test\") as ac:\n \n ALGO_NAME = AlteritAlgoName.fgsm_algo\n ffile = {'input_image': open(TEST_IMAGE_PATH, 'rb'),\n \"input_image_path\": TEST_IMAGE_PATH,\n \"alter_parameters\":json.dumps({\"acall\":True,\n \"epsilon\":0.01})\n }\n \n for epsilon_, result_ in zip([0.01, 0.1], ['saluki', 'weimaranner',]):\n r = await ac.post(f'/alterit/?algo_name={ALGO_NAME}', files = ffile)\n assert r.status_code == 200\n i1, i2, i3, i4 = await a_from_zip_stream_to_att_data(r)\n assert i1['0'][1] == result_ \n \n # async for i in mygen(5):\n # print(f'step {i}:')\n # #ALGO_NAME = AlteritAlgoName.fgsm_algo\n # ffile = {'input_image': open(TEST_IMAGE_PATH, 'rb'),\n # \"input_image_path\": TEST_IMAGE_PATH,\n # \"alter_parameters\":json.dumps({\"acall\":True,\n # \"epsilon\":0.01}),}\n # r[i] = await ac.post(f'/alterit/?algo_name={ALGO_NAME}', files = ffile)\n # assert r[i].status_code == 200\n # i1, i2, i3, i4 = await a_from_zip_stream_to_att_data(r[i])\n # assert i1['0'][1] == \"saluki\"", "async def test_recordings_proxy_view_success(hass_client_local_frigate: Any) -> None:\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/recordings/present\")\n assert resp.status == HTTP_OK\n\n resp = await hass_client_local_frigate.get(\"/api/frigate/recordings/not_present\")\n assert resp.status == HTTP_NOT_FOUND" ]
[ "0.6168632", "0.60641015", "0.60555446", "0.6000268", "0.59343565", "0.59123015", "0.58149856", "0.5790768", "0.5763791", "0.5751622", "0.5747959", "0.5700878", "0.56979954", "0.5681993", "0.566791", "0.5665153", "0.56642336", "0.56637", "0.56631935", "0.5621099", "0.5589105", "0.5572067", "0.55581313", "0.5554992", "0.5538371", "0.5524045", "0.5492497", "0.54899895", "0.54743785", "0.5472374", "0.54529786", "0.5451781", "0.54383063", "0.54278684", "0.542024", "0.541605", "0.5415081", "0.54128414", "0.5407692", "0.5396116", "0.5371347", "0.53639287", "0.53478825", "0.5340362", "0.5339655", "0.5334878", "0.5329345", "0.5326289", "0.5317898", "0.53178173", "0.5316157", "0.5308811", "0.5304848", "0.53037745", "0.5301673", "0.5298396", "0.5291021", "0.52797675", "0.52783585", "0.5277361", "0.5277361", "0.52745366", "0.52690876", "0.526801", "0.5267826", "0.52630484", "0.5261895", "0.52606195", "0.52491564", "0.5249142", "0.52486867", "0.52404547", "0.5238221", "0.5237447", "0.5236789", "0.52247", "0.52234083", "0.5210685", "0.52070874", "0.52059996", "0.5190732", "0.51900864", "0.5189401", "0.51834786", "0.51825744", "0.5179887", "0.5168622", "0.5164535", "0.51570886", "0.51569647", "0.5154207", "0.51503533", "0.5146819", "0.5140451", "0.5137627", "0.5127701", "0.5126574", "0.5125845", "0.5124793", "0.5122639" ]
0.66819775
0
Initializes the object to have a pronunciation dictionary available
def __init__(self): self._pronunciations = nltk.corpus.cmudict.dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def __init__(self):\n super().__init__()\n self.mu = 0.0\n self.type = 'Poisson'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self, reaction_info: ParticleReactionKinematicsInfo):\n self._reaction_info = reaction_info\n self._registered_inv_masses: Dict[Tuple, str] = dict()\n self._registered_subsystems: Dict[SubSystem, Tuple[str, str]] = dict()", "def __init__(self):\n self.dictionary = None", "def __init__ ( self , phenotypes ) :\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\t\t\tassert type( v[1] ) is int and type( v[0] ) is int, ' (!) recent change means bounds need to be in ints now: https://github.com/zafarali/metastasis/issues/17'\n\n\t\tself.phenotypes = phenotypes", "def _init(self):\n pass", "def __init__(self):\n super(sppasSymbolSettings, self).__init__()\n\n self.__dict__ = dict(\n unk=\"<UNK>\",\n phone=sppasSymbolSettings.__phone_symbols(),\n ortho=sppasSymbolSettings.__ortho_symbols(),\n all=sppasSymbolSettings.__all_symbols()\n )", "def __initialize_nlp(self, nlp):\n nlp[\"nbQ\"] = 0\n nlp[\"nbQdot\"] = 0\n nlp[\"nbTau\"] = 0\n nlp[\"nbMuscles\"] = 0\n nlp[\"plot\"] = {}\n nlp[\"var_states\"] = {}\n nlp[\"var_controls\"] = {}\n nlp[\"CX\"] = self.CX\n nlp[\"x\"] = nlp[\"CX\"]()\n nlp[\"u\"] = nlp[\"CX\"]()\n nlp[\"J\"] = []\n nlp[\"g\"] = []\n nlp[\"g_bounds\"] = []\n nlp[\"casadi_func\"] = {}", "def __init__(self):\n self.lookup = {}", "def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)", "def __init__(self):\n self._inst = {}", "def __init__(self,paramDict):\n self.pandeia_params = paramDict\n self.prep_and_run()", "def __init__(self, dict):\n self.dict = dict", "def __init__(self, dict):\n self.dict = dict", "def initPheromone(self):\n print '[Initializing pheromone values]'\n self.pheromoneValue = {}\n\n for token in self.postingTokens:\n self.pheromoneValue[token] = self.initialPheromone", "def __init__(self):\n self.ngramCounts = collections.defaultdict(zero_fn);\n self.continuationProb = collections.defaultdict(set_fn);\n self.total = 0;", "def __init__(self):\n self.dict = {}", "def __init__(self):\n self.dict = {}", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def __init__():", "def __init__(self):\n self._ngrams = {}", "def init(self) -> None:", "def __init__(self):\n self.dic = {}", "def __init__(self):\n self.dic = {}", "def __init__(self, mapping: Mapping[str, Any]) -> None:\n self.__dict__.update(mapping)", "def _init(self):\n raise NotImplementedError", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def initialize(self,inputDict):\n pass", "def __init__(self, P, I, D, dt):\n\n\t\tself._Kp = P\n\t\tself._Ki = I\n\t\tself._Kd = D\n\t\tself._dt = dt", "def __init__(self, variable, pnoun, nucleus):\n super(ProperNounExpression, self).__init__(variable, EmptyExpression(), nucleus)\n assert(pnoun in proper_nouns)\n self.pnoun = pnoun", "def __init__(self):\n self._map = {}", "def __init__ ( self , phenotypes ):\n\t\tself.counts = {}\n\t\tfor k , v in phenotypes.items():\n\t\t\tassert type( k ) is str , 'phenotype keys must be strings'\n\t\t\t\n\t\t\tself.counts[ k ] = 0\n\n\t\t\tassert v[1] > v[0] , 'upper bound of ' + k + ' must be greater than the lower bound'\n\n\t\tself.phenotypes = phenotypes", "def __init__(self):\n dict = defaultdict(list)\n self.conversion_dict = dict", "def __init__(self):\n self.function_dict = {\n \"Sphere\": self.draw_sphere,\n \"BSpline\": self.draw_nurbspatch,\n \"Cylinder\": self.draw_cylinder,\n \"Cone\": self.draw_cone,\n \"Torus\": self.draw_torus,\n \"Plane\": self.draw_plane,\n }", "def __init__(self):\n self.donors = {}", "def __init__(self, clean_name, type_key, attr_key, top_key = \"none\"):\n self._clean_name = clean_name\n self._type_key = type_key\n self._attr_key = attr_key\n self._top_key = top_key\n\n #add our defaults into the dictionary so that we don't change them in the syslog file\n self._insts = {}\n self._num_defaults = 0\n self._insts[\"default\"] = \"default\"\n self._insts[\"any\"] = \"any\"\n self._insts[\"all\"] = \"all\"\n self._num_defaults = len(self._insts)", "def __init__(self, *, base=None):\n self._base = base\n\n self._map = {}", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def init(self) -> None:\n ...", "def __init__(self):\n self.functions = {}", "def __init__(self):\n self.dictword={}", "def initialize(self, **kwargs):", "def __init__(self):\n super(sppasAnnotationsSettings, self).__init__()\n self.__dict__ = dict(\n error=-1,\n ok=0,\n warning=1,\n ignore=2,\n info=3,\n\n extension=\".xra\",\n\n # all the types of the annotations implemented into SPPAS\n types=(\"STANDALONE\", \"SPEAKER\", \"INTERACTION\"),\n\n # standard iso639-3 code for an undetermined language.\n UNDETERMINED=\"und\"\n\n )", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self, donors=None):\n if donors is None:\n self.donor_inf = {}\n else:\n self.donor_inf = {name: donations}", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def _init(self):", "def __init__(self):\n self.aeropuertos = {}", "def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)", "def initialize(self):\n\t\tpass", "def __init__(self, pg):\n self._paths = []\n for key, value in pg.items():\n if key == \"paths\":\n for path in pg[\"paths\"]:\n self._paths.append(DMMP_path(path))\n else:\n setattr(self, \"_%s\" % key, value)", "def __init__(self):\n self.__dict__.update(\n itertools.starmap(\n lambda key, value: (\n key[0].lower() + # upper case the first letter and add\n key.title() # title case all text\n .replace('_', '') # remove undersore\n [1:] # all text without the first char\n , value\n ) #lambda\n ,os.environ.items()\n ) #itertools.starmap\n ) #update", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def __init__(self):\n self.key_to_record = {}\n self.mutation_to_key = {}\n self._innovation_key_generator = count(0)", "def __init__(self):\n for item in grammar:\n item['matches_compiled'] = {}\n for name,pattern in item['matches'].items():\n item['matches_compiled'][name] = \\\n re.compile(pattern, re.IGNORECASE)\n\n item['semantics_compiled'] = {}\n for name,pattern in item['semantics'].items():\n item['semantics_compiled'][name] = \\\n re.compile(pattern)\n\n if constants.SPELLCHECK:\n self.didyoumean = DidYouMean('en-us', constants.DICT_DIR)", "def __init__(self, dict = {}):\r\n if dict == {}:\r\n self.zero_val()\r\n else:\r\n self.piDD = dict\r\n self.top_node = utilities.max_length_in_list(self.return_keys())\r\n if self.piDD[self.top_node] == None:\r\n self.dim = 0\r\n else:\r\n self.dim = self.piDD[self.top_node][0][0]", "def __init__(self, chars=False):\n self.chars = chars\n self.lm_dict = defaultdict(lambda: defaultdict(float))", "def __init__(self, name=''):\n self.domain = (0, np.pi)\n self._evaluator_overrides = None\n self._name = name\n self._user_data = dict()\n self.__loaded_from = None", "def __init__(self, snps, phenotype):\n self.snps = snps\n self.phenotype = phenotype", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__(self):\r\n self.dic={}", "def __init__(self):\n self.map = {}", "def __init__(self, func):\n self.dictionary = {}\n self.func = func", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def initialise(self):", "def __init__(self):\n self.class_symbol_table = dict()\n self.subroutine_symbol_table = dict()\n self.counter = {VAR: 0, ARG: 0, FIELD: 0, STATIC: 0}", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\n _hypre.HypreIdentity_swiginit(self, _hypre.new_HypreIdentity())", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def _initialize(self, **kwargs):\n return None", "def init():", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(cls):", "def __init__(self):\n self.origin = {}" ]
[ "0.7030081", "0.62855494", "0.62579364", "0.6253256", "0.6249114", "0.61850667", "0.6163011", "0.6113119", "0.6111343", "0.6096763", "0.6093766", "0.60932755", "0.60580236", "0.60405856", "0.60405856", "0.60081416", "0.60046226", "0.59790784", "0.59790784", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59735584", "0.59675187", "0.59673834", "0.5965411", "0.59543717", "0.5953881", "0.5953881", "0.59423065", "0.5941189", "0.59311414", "0.5927725", "0.592081", "0.5919144", "0.5918009", "0.5901689", "0.5900801", "0.58942956", "0.58852035", "0.5885165", "0.5875862", "0.5856611", "0.5856611", "0.5856611", "0.58463234", "0.58463234", "0.58463234", "0.5843579", "0.5839793", "0.5832226", "0.582308", "0.58210206", "0.58203846", "0.58203846", "0.58203846", "0.58203846", "0.5819483", "0.5817034", "0.5817034", "0.580595", "0.58026457", "0.5801717", "0.57890046", "0.57830834", "0.57830495", "0.57805485", "0.5775809", "0.57750547", "0.5772174", "0.57692987", "0.5767853", "0.57540876", "0.57512957", "0.5750313", "0.5750313", "0.5750313", "0.5750313", "0.5750313", "0.57450104", "0.5744728", "0.573731", "0.57365495", "0.5734674", "0.5732217", "0.57292867", "0.5728841", "0.571434", "0.57138515", "0.5709782", "0.57073337", "0.5706052", "0.5706052", "0.5700947", "0.5691591" ]
0.75347185
1
Returns the number of syllables in a word. If there's more than one pronunciation, take the shorter one. If there is no entry in the dictionary, return 1.
def num_syllables(self, word): # TODO: provide an implementation! word = word.lower() D = self._pronunciations #D = nltk.corpus.cmudict.dict() if(word not in D.keys()): #print word not in CMUDictionary return 1 #count stores no of syllables for each pronunciation of the word count = [] #for each pronunciation for x in D[word]: n = 0 #for each syllable for y in x: #if vowel sound if y[-1].isdigit(): n = n + 1 count.append(n) # return the pronunciation having least syllables return min(count) #return min([len([y for y in x if y[-1].isdigit()]) for x in D[word.lower()]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n return len(word)//3", "def num_syllables(self, word):\n\n return 1", "def num_syllables(self, word):\n \"\"\"\n using the logic of vowel counting, count all vowels in the pronunciations\n \"\"\"\n dictionary = self._pronunciations;\n # check if word is present in the CMU dictionary\n if word in dictionary :\n word_pronunciations = dictionary[word.lower()]\n else :\n return 1\n \n vowels = ['A', 'E', 'I', 'O', 'U']\n \n ## find the shorter pronunciation for word\n shorter_arr = [];\n for pronunciation in word_pronunciations :\n if len(pronunciation) > len(shorter_arr) : shorter_arr = pronunciation\n \n num_length = 0\n \n for phoneme in shorter_arr :\n if phoneme[:1] in vowels : num_length += 1\n \n return num_length", "def syllable_counter(word):\n letters = [c for c in list(word.lower()) if c.isalpha()]\n\n if len(letters) == 0:\n return 0\n\n if len(letters) in [1, 2]:\n return 1\n\n num_syllables = 0\n last_syllable_pos = 0\n for i, letter in enumerate(letters):\n if letter not in VOWELS:\n if i and letters[i - 1] in VOWELS:\n num_syllables += 1\n last_syllable_pos = i\n syllable = ''\n elif i == len(letters) - 1:\n if letter != 'e':\n num_syllables += 1\n elif i - last_syllable_pos >= 2:\n num_syllables += 1\n\n return num_syllables or 1", "def countsyllables_en(word):\r\n\tif not word:\r\n\t\treturn 0\r\n\r\n\t# Remove final silent 'e'\r\n\tif word[-1] == \"e\":\r\n\t\tword = word[:-1]\r\n\r\n\t# Check for a cached syllable count\r\n\tif word in fallback_cache:\r\n\t\treturn fallback_cache[word]\r\n\r\n\t# Count vowel groups\r\n\tresult = 0\r\n\tprev_was_vowel = False\r\n\tfor char in word:\r\n\t\tis_vowel = char in VOWELS or char == 'y'\r\n\t\tif is_vowel and not prev_was_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\t# Add & subtract syllables\r\n\tfor r in fallback_addsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult += 1\r\n\tfor r in fallback_subsyl:\r\n\t\tif r.search(word):\r\n\t\t\tresult -= 1\r\n\r\n\t# Cache the syllable count\r\n\tfallback_cache[word] = result\r\n\r\n\treturn result", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def get_syllables(word):\n\tif word not in syllable_dict:\n\t\ttry: syllables = wordApi.getHyphenation(word)\n\t\texcept UnicodeEncodeError:\n\t\t\tsyllable_dict[word] = np.NaN\n\t\tif not syllables:\n\t\t\tsyllables = wordApi.getHyphenation(word.lower())\n\t\t\tif not syllables:\n\t\t\t\tsyllables = wordApi.getHyphenation(word.capitalize())\n\t\t\t\tif not syllables:\n\t\t\t\t\tsyllable_dict[word] = np.NaN\n\t\t\t\t\treturn syllable_dict[word]\n\t\tsyllable_dict[word] = len(syllables)\n\treturn syllable_dict[word]", "def count_syllables(words):\n\n\n count = 0\n\n for word in words:\n word_count = count_syllables_in_word(word)\n count = count + word_count\n return count", "def estimate(word):\n parts = re.split(r'[^aeiouy]+', word)\n valid_parts = []\n\n for part in parts:\n if part != '':\n valid_parts.append(part)\n\n syllables = 0\n\n for p in re_subsyllables:\n if p.match(word):\n syllables -= 1\n\n for p in re_addsyllables:\n if p.match(word):\n syllables += 1\n\n syllables += len(valid_parts)\n\n if syllables <= 0:\n syllables = 1\n\n return syllables", "def word_syllables(word):\n\n count = 0\n endings = '!@#$%^&*()_+[]{}:;,.eE\"'+\"'\"\n\n while word[-1] in endings:\n word = word[: -1]\n\n if len(word) <= 3:\n return 1\n\n vows = 'aeiouAEIOU'\n prev_char_vow = False\n for char in word:\n if char in vows:\n if not prev_char_vow:\n count = count + 1\n prev_char_vow = True\n else:\n prev_char_vow = False\n\n if word[-1] in 'Yy':\n count = count + 1\n\n return count", "def syllable_count(word):\n # Count the vowels in the word\n # Subtract one vowel from every dipthong\n count = len(re.findall(r'([aeiouyAEIOUY]+)', word))\n # Subtract any silent vowels\n if len(word) > 2:\n if word[-1] == 'e' and \\\n not is_vowel(word[-2]) and \\\n is_vowel(word[-3]):\n count = count - 1\n return count", "def count_syllables_in_word(word):\n\n count = 0\n\n endings = '!,;.?:'\n last_char = word[-1]\n\n if last_char in endings:\n processed_word = word[0:-1]\n else:\n processed_word = word\n\n\n if len(processed_word) <= 3:\n return 1\n if processed_word[-1] in 'Ee':\n processed_word = processed_word[0:-1]\n\n vowels = 'aeiouAEIOU'\n prev_char_was_vowel = False\n\n for char in processed_word:\n if char in vowels:\n if not prev_char_was_vowel:\n count += 1\n prev_char_was_vowel = True\n\n else:\n prev_char_was_vowel = False\n\n if processed_word[-1] in 'yY':\n count += 1\n \n\n return count", "def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1", "def total_syllables(target_text):\n\n splited_text = target_text.split()\n count = 0\n for word in splited_text:\n count = count + word_syllables(word)\n return count", "def n_polysyllable_words(\n doc_or_tokens: types.DocOrTokens,\n *,\n lang: Optional[str] = None,\n min_n_syllables: int = 3,\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns >= min_n_syllables)", "def count_syllables(text):\n\n import re\n\n # Make a list of vowel sounds presenting in the text (converted to lower-case letters)\n syllable_list = re.findall(r'[aiouy]+e*|e(?!d\\b|ly)[aiouye]?|[td]ed|le\\b', text.lower())\n # Find the size of the list\n count = len(syllable_list)\n\n return count", "def count_syllables(book):\n d = dict(cmudict.entries())\n with open(book, 'r') as myfile:\n booky = myfile.read().lower()\n tokenized_book = nltk.word_tokenize(booky)\n\n count = 0\n for word in tokenized_book:\n count += ( nsly(word, d))\n\n return count", "def n_syllables_per_word(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> tuple[int, ...]:\n if lang is None:\n if isinstance(doc_or_tokens, Doc):\n lang = doc_or_tokens.lang_\n else:\n raise ValueError(\n \"`lang` must be specified when computing n syllables per word \"\n \"from an iterable of tokens\"\n )\n hyphenator = utils.load_hyphenator(lang=lang)\n words = utils.get_words(doc_or_tokens)\n return tuple(len(hyphenator.positions(word.lower_)) + 1 for word in words)", "def n_monosyllable_words(\n doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None\n) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return itertoolz.count(ns for ns in nspw if ns == 1)", "def n_syllables(doc_or_tokens: types.DocOrTokens, *, lang: Optional[str] = None) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n nspw = n_syllables_per_word(doc_or_tokens, lang=lang)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n nspw = n_syllables_per_word(tuple(words), lang=lang)\n return sum(nspw)", "def count_syllables(word):\n vowels = \"aeiouy\"\n count = 0\n last_was_vowel = False\n for letter in word:\n found_vowel = False\n for v in vowels:\n if v == letter:\n if not last_was_vowel: count += 1 # don't count diphthongs\n found_vowel = last_was_vowel = True\n break\n if not found_vowel: # If full cycle and no vowel found, set last_was_vowel to false\n last_was_vowel = False\n\n\n if len(word) > 2 and word[-2:] == \"es\" and count > 1: # Remove es - it's \"usually\" silent (?)\n count -= 1\n\n if len(word) > 4 and word[-1:] == \"e\": # remove silent e\n count -= 1\n\n if len(word) > 1 and word[-2:] == \"ee\": # adds 1 for na\n count += 1\n\n if len(word) > 1 and word[-2:] == \"na\": # adds 1 for na\n count += 1\n\n # Check for special case words\n special_case = ['eloise','i']\n if word in special_case:\n count += 1\n\n return count", "def countsyllables_nlde(word):\r\n\tresult = 0\r\n\tprev_was_vowel = word[0] in VOWELS\r\n\tfor char in word[1:]:\r\n\t\tis_vowel = char in VOWELS\r\n\t\tif prev_was_vowel and not is_vowel:\r\n\t\t\tresult += 1\r\n\t\tprev_was_vowel = is_vowel\r\n\r\n\tif (len(word) > 1 and word[0] in VOWELS\r\n\t\t\tand word.endswith('e') and not word[-2] in VOWELS):\r\n\t\tresult += 1\r\n\treturn result or 1", "def syllable_counter(string):\n\ti = 0 # index of while loop \n\tcounter = 0 # counter of syllables\n\tvowels = ['a','e','i','o','u','y','e '] # what are vowels\n\tdiphthongs = ['ee', 'ei', 'ea', 'oo', 'oi', 'oy', 'ou', 'ai', 'ie', 'ey', 'ay'] #what are diphthongs\n\tindex = 0 \n\n\twhile string[index] != ' ': # break at space\n\t\tchar = string[index] # look at each letter in string\n\t\tnext_char = string[index+1] # and the letter following\n\t\tif char.isalpha():\n\t\t\tif char in vowels: \n\t\t\t\tif (char + next_char in diphthongs): \n\t\t\t\t\tcounter = counter + 1 # count\n\t\t\t\t\tindex = index + 1 # skips second letter in diphthong\n\t\t\t\telif (char == 'e' and next_char == ' '): # assume if e at end of word, is not syllable\n\t\t\t\t\tpass # don't count\n\t\t\t\telse: \n\t\t\t\t\tcounter = counter + 1 # if it's a solitary vowel, add one to counter\n\t\tindex = index + 1\n\n\treturn counter", "def count(word):\n\n return len(word)", "def getWordScore(word: str, n: int) -> int:\n # (SCRABBLE_LETTER_VALUES[char]) rise a exception if char not in SCRABBL...\n ans = sum(SCRABBLE_LETTER_VALUES.get(char, 0) for char in word) * len(word)\n\n # [if False, if True] [condition] (ternary op)\n return [ans, ans + 50] [len(word) == n]", "def number_syllables(self):\n return len(self.array_form)", "def getWordCharCount(w):\r\n rus = len(re.findall(r\"[а-я]\",w))\r\n eng = len(re.findall(r\"[a-z]\",w))\r\n c = len(w) \r\n return c, rus, eng", "def count_word2(self, word):\n pass", "def count_words_and_dublicates(novel):", "def getWordScore(word, n):\n score = 0\n\n for letters in word:\n if letters in SCRABBLE_LETTER_VALUES:\n score += SCRABBLE_LETTER_VALUES[letters]\n\n if len(word) == n:\n return (score * len(word)) + 50\n else:\n return score * len(word)", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def getWordScore(word, n):\n score=0\n for i in range(len(word)):\n addition=SCRABBLE_LETTER_VALUES[word[i]]\n score+=addition*(len(word))\n if len(word)==n:\n score+=50\n return score", "def count(self, word):\n pass", "def syllable_dict():\n counts = dict()\n \n with open('data/Syllable_dictionary.txt') as file:\n for line in file:\n arr = line.split(' ', 1)\n if 'E' in arr[1]:\n cts = arr[1].split(' ', 1)\n counts[arr[0].strip('\\'')] = int(cts[1][0])\n counts[(arr[0].strip('\\'') + \"_\")] = int(cts[0][1])\n else:\n counts[arr[0].strip('\\'')] = int(arr[1][0])\n return counts", "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def compute_morpheme_length(attribs):\n num_morphemes = 0\n for m in attribs:\n if m:\n if type(m) == list:\n num_morphemes += len(m)\n else:\n num_morphemes += 1\n if num_morphemes == 0: #To be consistent with the previous functionality\n return None\n return num_morphemes", "def get_word_count(words):\n return sum(1 for word in words if word not in punctuation)", "def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts", "def get_word_width(self, word: str) -> float:\n return self.state.size * self.state.font.get_text_width(word)", "def match_pfx(uni_word, morphs):\n uni_morph = unicode(morphs[0].lex, 'UTF-8')\n if uni_word.startswith(uni_morph): # just one morpheme starts with word\n return len(uni_morph), 1\n if len(morphs) == 1: # only one morpheme is left\n morph_dec = decompose(morphs[0].lex)\n word_dec = decompose(uni_word)\n if word_dec == morph_dec:\n return 1, len(uni_word)\n else:\n return -1, -1\n for i in range(2, len(morphs)+1):\n submorphs = ''.join([morph.lex for morph in morphs[:i]])\n submorphs_dec = decompose(submorphs)\n for k in range(1, len(unicode(submorphs, 'UTF-8'))):\n word_dec = decompose(uni_word[:k])\n # logging.debug(' %s(%s:%s) <- %s(%s:%s)', uni_word[:k].encode('UTF-8'), word_dec, to_hex(word_dec),\n # submorphs, submorphs_dec, to_hex(submorphs_dec))\n if word_dec == submorphs_dec:\n return k, i\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n logging.debug('PFX: %s(%s): %s', uni_word.encode('UTF-8'), decompose(uni_word), morphs_str)\n return -1, -1", "def getWordScore(word, n):\n score = 0\n for letter in word:\n score += SCRABBLE_LETTER_VALUES[letter]\n score *= len(word)\n if len(word) == n:\n score += 50\n return score", "def word_count(s):\n # Your code here\n\n stop_char = r\"\"\":;\",.-+=/|[]{|}()*^\\&\"\"\"\n\n # Make sure special characters arent in string\n s_clean = \"\".join([x for x in s if x not in stop_char])\n\n # Lower case and remove trailing space\n word_list = s_clean.lower().split()\n\n # use cache to hold memory\n word_count = {}\n\n for x in word_list:\n\n if x not in word_count:\n # if not there, start it at 0\n word_count[x] = 0\n\n # if seen again, increase count\n word_count[x] += 1\n\n return word_count", "def getWordKey(word):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return len(word), word\n # END_YOUR_ANSWER", "def num_chars(word):\n return len(word)", "def most_repeating_letter_count(word):\n return Counter(word.lower()).most_common(1)[0][1]", "def frequency(self, word):\n if word in self.keys():\n return self[word]\n else:\n return 0", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def get_counts(data):\n\n word_count = {}\n syll_count = {}\n\n infile = data.corpus\n try:\n\n open_file = codecs.open(infile, 'r', encoding='utf-16')\n for line in open_file:\n line = line.lower()\n # Remove tablet indexing info and line numbers. Grab only text data\n line = line.split(',')\n text = clean_line(line[7])\n\n # Update the occurrences of the words in the line\n for word in text.split():\n count = word_count.setdefault(word, 0)\n word_count[word] = count + 1\n\n # Track occurrences of syllables\n update_syllable_count(word, syll_count)\n\n open_file.close()\n except IOError:\n print(\"Cannot open: \" + infile)\n\n return (word_count, syll_count)", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def countWord(self,phrase):\n return self._support.countWord(phrase)", "def calc_word_value(word):\n return sum([LETTER_SCORES.get(letter.upper(), 0) for letter in word])", "def get_word_score(word, n=7):\n score = 0\n\n for i in word:\n score += SCRABBLE_LETTER_VALUES[i]\n\n if len(word) == n:\n score += 50\n\n return score", "def n_chars_per_word(doc_or_tokens: types.DocOrTokens) -> tuple[int, ...]:\n words = utils.get_words(doc_or_tokens)\n return tuple(len(word) for word in words)", "def count_words(subreddit, word_list):\n word_list = [str.lower() for str in word_list]\n\n my_list = get_hot_list(subreddit)\n my_dict = {}\n\n for word in word_list:\n my_dict[word] = 0\n try:\n for title in my_list:\n title_split = title.split(\" \")\n\n for iter in title_split:\n for iter_split in word_list:\n if iter.lower() == iter_split.lower():\n my_dict[iter_split] += 1\n\n for key, val in sorted(my_dict.items(), key=lambda x: x[1],\n reverse=True):\n if val != 0:\n print(\"{}: {}\".format(key, val))\n except Exception:\n return None", "def word_count(poem):\n lines = [line for line in poem.split(\"\\n\") if line]\n word_map = {}\n for line in lines:\n for word in line.split(\" \"):\n if word:\n if word in word_map:\n word_map[word] += 1\n else:\n word_map[word] = 1\n return word_map", "def get_word_weight(words, word):\n weight = 0\n for w in words:\n if not any(c in word for c in w):\n weight += 1\n\n return weight", "def count_words():\n paragraph = \"a distinct section of a piece of writing,\"\n # 替换\n paragraph.replace(\",\", \" \").replace(\":\", \" \").replace(\";\", \" \").replace(\".\", \" \").replace(\"?\", \" \")\n words = paragraph.split(\" \")\n nums = {}\n\n for word in words:\n nums[word] = nums[word]+1 if word in nums else 1\n # nums[word] = nums.get(word, 0) + 1\n\n for word, num in nums.items():\n print(word, \": \", num)", "def StressGuesser(self, origword):\n numsyls = len(self.sylBounds) + 1\n if numsyls == 1: return 1\n self.sylBounds.sort() # suffixes may have been marked first\n if self.forceStress: # suffixes like 'tion', 'cious'\n return numsyls + self.forceStress\n if numsyls - self.numSuffixes == 1: # pretty reliable I think\n return 1\n isprefix = self.wd[:self.sylBounds[0]] in PREFIXES\n if numsyls - self.numSuffixes == 2: # Nessly w/ suffix twist\n if isprefix: return 2\n else: return 1\n elif isprefix and (numsyls - self.numSuffixes == 3):\n return 2\n else: # Nessley: 3+ syls, str penult if closed, else antepenult\n # syl n is origword[self.sylBounds[n-1]:self.sylBounds[n]-1]; so?\n if (origword[self.sylBounds[-1] - 1]\n not in 'aeiouy'): # last char penult\n retstress = numsyls - 1 # if closed, stress penult\n else: retstress = numsyls - 2 # else, antepenult\n if self.numSuffixes == numsyls:\n retstress -= 1\n return retstress", "def syll_over_text(data_word):\n\n step = 200\n y = []\n temp_syll = []\n\n for count, word in enumerate(data_word, 1):\n\n temp_syll.append(textstat.syllable_count(word))\n\n if count >= step:\n y.append(sum(temp_syll)/len(temp_syll))\n temp_syll = temp_syll[1:]\n\n x = range(step,len(y)+step)\n return x,y", "def word_count(phrase):\n return collections.Counter(phrase.split())", "def _get_num_long_words(doc: Doc, min_characters=7):\n\n filtered_words = [\n word\n for word in doc\n if not word.is_punct\n and \"'\" not in word.text\n and len(word.text.strip()) >= min_characters\n ]\n return len(filtered_words)", "def _count_words_in_string(self, sentence):\n word_count = dict()\n for i in sentence:\n if word_count.get(i) is None:\n word_count[i] = 1\n else:\n word_count[i] = word_count.get(i)+1\n\n return word_count", "def get_max_word_length(self, word_dict):\n max_len = 0\n max_word = \"\"\n for word in word_dict:\n word = \"^\" + word + \"$\"\n if len(word) > max_len:\n max_len = len(word)\n max_word = word\n print(\"Longest word: \" + max_word + \" \" + str(max_len))\n return max_len", "def frequency_of(self, word):\n for key in self.frequencies().keys():\n if key == word:\n return self.frequencies()[key]\n return 0", "def length_uc(x):\r\n return sum(length(m) for m in metamer(x))", "def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))", "def test_number_in_word():\n assert syllapy.count(\"d0g\") == 0", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def wordCount( aList ):\n return len( aList )", "def word_count(self, doc):\n\n return len(self.tokenize_doc_simple(doc))", "def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)", "def match_sfx(uni_word, morphs):\n uni_morph = unicode(morphs[-1].lex, 'UTF-8')\n if uni_word.endswith(uni_morph): # just one morpheme ends with word\n return len(uni_morph), 1\n for i in range(-2, -(len(morphs)+1), -1):\n submorphs = ''.join([morph.lex for morph in morphs[i:]])\n submorphs_dec = decompose(submorphs)\n for k in range(-1, -len(unicode(submorphs, 'UTF-8')), -1):\n word_dec = decompose(uni_word[k:])\n # logging.debug(' %s(%s:%s) <- %s(%s:%s)', uni_word[k:].encode('UTF-8'), word_dec, to_hex(word_dec),\n # submorphs, submorphs_dec, to_hex(submorphs_dec))\n if word_dec == submorphs_dec:\n return -k, -i\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n logging.debug('SFX: %s(%s): %s', uni_word.encode('UTF-8'), decompose(uni_word), morphs_str)\n return -1, -1", "def frequency(self, word):\n if word in self:\n return self[word].tokens\n return 0", "def theLoveLetterMystery(s):\n mincount = 0\n for i in range(len(s) // 2):\n mincount += abs(ord(s[i]) - ord(s[-1 - i]))\n\n return mincount", "def n_long_words(doc_or_tokens: types.DocOrTokens, *, min_n_chars: int = 7) -> int:\n # docs are hashable, so we can leverage the lru cache as-is\n if isinstance(doc_or_tokens, Doc):\n ncpw = n_chars_per_word(doc_or_tokens)\n # otherwise, let's get an iterable of words but cast it to a hashable tuple\n # so we can leverage the lru cache on this and related calls in, say, n_long_words\n else:\n words = utils.get_words(doc_or_tokens)\n ncpw = n_chars_per_word(tuple(words))\n return itertoolz.count(nc for nc in ncpw if nc >= min_n_chars)", "def len_phoc(levels=DEFAULT_PHOC_LEVELS,\n alphabet=[Alphabet.ASCII_LOWER, Alphabet.ASCII_DIGITS, Alphabet.ASCII_PUNCTUATION]):\n return len(phoc('', alphabet=alphabet, levels=levels))", "def _label_width(text):\n width = 0\n for lineno, line in enumerate(text.split(u'\\n')):\n size = [_BIG_FONT, _SMALL_FONT][lineno > 0] # Cool idiom, huh?\n width = max(width, size * len(line))\n return width", "def len_special(self):\n return sum(self.count(term_chars) * len(term_chars) for term_chars in termcolor.values())", "def word_count(string):\n counts = dict()\n words = string.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return len(counts)", "def forcast(self, doc):\n num_words = _get_num_words(doc)\n\n if num_words < 150:\n return 0\n\n mono_syllabic = 0\n for i in range(150):\n if syllapy.count(doc[i].text) == 1:\n mono_syllabic += 1\n return 20 - (mono_syllabic / 10)", "def better_solution(self, S: str, words: List[str]) -> int:\n def encode(S):\n return zip(*[(k, len(list(v))) for k, v in groupby(S)])\n\n k, v = encode(S)\n count = 0\n for word in words:\n kk, vv = encode(word)\n if k != kk:\n continue\n count += int(all(c1 >= max(c2, 3) or c1 == c2 for c1, c2 in zip(v, vv)))\n return count", "def get_number_of_document_word_occurs_in(self, word):\n return len(self.dictionary[word]['docs'])", "def frequencyLetterDic(s):\n pass", "def n_words(doc_or_tokens: types.DocOrTokens) -> int:\n words = utils.get_words(doc_or_tokens)\n return itertoolz.count(words)", "def word_count(self):\n return Counter(self._normalize(self._raw_phrase_str))", "def number_of_words_with_doubles(self):\n words = self.body.split()\n words_with_doubles_count = 0\n for word in words:\n for char in word[0:len(word) - 1]:\n if char == word[word.index(char) + 1]:\n words_with_doubles_count += 1\n break\n return words_with_doubles_count", "def freq(word, document):\n return document.split(None).count(word)", "def _WordScore(index, normalized_command_word,\n canonical_command_word, canonical_command_length):\n score = 0\n\n # The match can go either way.\n if normalized_command_word in canonical_command_word:\n shorter_word = normalized_command_word\n longer_word = canonical_command_word\n elif canonical_command_word in normalized_command_word:\n shorter_word = canonical_command_word\n longer_word = normalized_command_word\n else:\n return score\n\n # Inner match must be a word boundary.\n hit = longer_word.find(shorter_word)\n if hit > 0 and longer_word[hit-1] != '-':\n return score\n\n # Partial hit.\n score += 10\n\n # Prefer a match in less words.\n if canonical_command_length == 1:\n score += 30\n elif canonical_command_length == 2:\n score += 20\n elif canonical_command_length == 3:\n score += 10\n\n # Prefer a match in order.\n if index == 0:\n score += 25\n elif index == 1:\n score += 15\n else:\n score += 5\n\n # Prefer matching more chars and beginning of word.\n # This also handles minor suffix diffs, like singular vs. plural.\n extra = len(longer_word) - len(shorter_word)\n if extra <= 2:\n extra = 3 - extra\n if longer_word.startswith(shorter_word):\n extra *= 2\n score += extra\n\n # Prefer matching on surface words.\n if index == 0 and canonical_command_length > 1:\n score += 30\n # Also prefer matching on group words.\n elif index > 0 and canonical_command_length > index + 1:\n score += 15\n\n return score", "def count_words(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\tword_list = [w for w in blob.words if '\\'' not in w]\n\treturn len(word_list)", "def n_unique_words(doc_or_tokens: types.DocOrTokens) -> int:\n words = utils.get_words(doc_or_tokens)\n # NOTE: this stdlib solution is slower than itertoolz for docs with ~250+ words\n # so let's take a small hit on short docs for the sake of big wins on long docs\n # return len({word.lower for word in words})\n return itertoolz.count(itertoolz.unique(word.lower for word in words))", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def count_word(word, titles):\n word = word.lower()\n count = 0\n for title in titles:\n if word in title.lower():\n count += 1\n return count", "def word_count(self):\n return self._word_count", "def makeWordLengths(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if len(x) not in self.wordlengths: \r\n self.wordlengths[len(x)] = 1\r\n else: \r\n self.wordlengths[len(x)] += 1\r\n return self.wordlengths", "def keyword_length(text):\n text = scrub_string(text)\n a = [fabs(IC(text, ncol) - ENGLISH_IC) for ncol in range(1, MAX_LEN)]\n return a.index(min(a)) + 1", "def count_words(word, words):\n same_words_in_message = 0\n for element in words:\n if element == word:\n same_words_in_message += 1\n return same_words_in_message", "def _get_num_words(doc: Doc):\n filtered_words = [word for word in doc if not word.is_punct and \"'\" not in word.text and not word.is_space]\n return len(filtered_words)", "def num_of_words(line, context):\n return [('num_of_word', len(line.txt.split()))]", "def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work" ]
[ "0.8187434", "0.7941615", "0.7839003", "0.77053165", "0.76928765", "0.76834965", "0.7579669", "0.7408953", "0.724696", "0.722788", "0.7227621", "0.71323586", "0.70042217", "0.69125587", "0.68785167", "0.68268627", "0.6769647", "0.67252815", "0.67212397", "0.6622493", "0.65549856", "0.65315974", "0.6458056", "0.6143281", "0.6068877", "0.5966394", "0.5941749", "0.5903695", "0.5882586", "0.58493674", "0.58196396", "0.58069265", "0.58058834", "0.5797045", "0.5772696", "0.5772325", "0.5755847", "0.5750868", "0.57437867", "0.5737898", "0.57283735", "0.5707326", "0.57032484", "0.5691758", "0.56868", "0.56823355", "0.5642579", "0.5632297", "0.5629562", "0.5628837", "0.56216156", "0.56161296", "0.5614792", "0.5602401", "0.55889195", "0.5567461", "0.556507", "0.5549596", "0.5544518", "0.5544006", "0.5535119", "0.5530532", "0.552832", "0.55096537", "0.55003273", "0.54969645", "0.5487211", "0.5483182", "0.5483147", "0.54801685", "0.5468863", "0.54657894", "0.5459463", "0.54568624", "0.54562193", "0.5456186", "0.54558605", "0.5449758", "0.5442174", "0.5441771", "0.54388624", "0.5433113", "0.54330206", "0.5415036", "0.5409182", "0.5406529", "0.5404546", "0.5401429", "0.53978586", "0.53937215", "0.5392519", "0.5392237", "0.5388957", "0.537926", "0.53786707", "0.53736377", "0.5367981", "0.53654397", "0.53570133", "0.5351345" ]
0.8625879
0
Returns True if two words (represented as lowercase strings) rhyme, False otherwise.
def rhymes(self, a, b): D = self._pronunciations a = a.lower() b = b.lower() # print "----------------------------------" # print "Rhyming ",a,b if a in D.keys() and b in D.keys(): a = D[a] #print a b = D[b] #print b #stores syllables after the first consonant sound last_syl_a = [] last_syl_b = [] # for each pronunciation of the word for y in a: syl = [] pos = 0 for i in range(0, len(y)): #if vowel if y[i][-1].isdigit(): pos = i break # append all syllables from first vowel for i in range(pos, len(y)): syl.append(y[i]) last_syl_a.append(syl) # print(last_syl_a) # for each pronunciation of the word for y in b: syl = [] pos = 0 for i in range(0, len(y)): # if vowel if y[i][-1].isdigit(): pos = i break # append all syllables after first consonant sound for i in range(pos, len(y)): syl.append(y[i]) last_syl_b.append(syl) # print(last_syl_b) if any(i in last_syl_a for i in last_syl_b): # print "Rhyming - Yes" return True else: # print "Checking if Shorter word is suffix of Longer word's pronunciation" if len(last_syl_a[0]) > len(last_syl_b[0]): big = last_syl_a small = last_syl_b else: big = last_syl_b small = last_syl_a for i in big: for j in small: count = 0 for k in range(0, len(j)): if j[-(k + 1)] == i[-(k + 1)]: count = count + 1 if count == len(j) and count > 0: # print "Rhyming - yes", i,j return True return False else: # Either or Both words not in CMU Dictionary return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def are_words_synonym(self, word1, word2):\n return self.get_intersection((word1, word2))", "def is_lexical(word_i, word_j):\n if word_i.isalpha() and word_j.isalpha():\n return True\n return False", "def exactMatch(self, mention):\n w1 = self.allWords()\n w2 = mention.allWords()\n if len(w1) == len(w2) and w1 == w2:\n return True\n else:\n return False", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def check(self, word: str) -> bool:\n for s in (word, word.lower(), word.capitalize()):\n if s in self.words or s in self.ignored_words:\n return True\n return False", "def similar_strings(s1, s2):\n w1 = set(re.split(r'\\W+', s1))\n w2 = set(re.split(r'\\W+', s2))\n threshold = len(w1) // 2 + 1\n return len(w1 & w2) >= threshold", "def similarL1a(a, b):\n for x in b:\n if x.lower() in a.lower():\n return True\n return False", "def is_word_common(self, word):\n if word in self.stopwords:\n return True\n if re.match(r'[a-zA-Z]+[a-zA-Z]$', word):\n word = self.lemmatizer.lemmatize(word, pos='n')\n synset = wn.synsets(word)\n if len(synset) > 0:\n return True\n else:\n return False\n return False", "def check_common_word(song: Song, result: Result) -> bool:\n\n sentence_words = slugify(song.name).split(\"-\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n for word in sentence_words:\n if word != \"\" and word in to_check:\n return True\n\n return False", "def islower(self) -> bool:\n pass", "def _does_words_matches(original_word: str, encoded_word: str) -> bool:\n return(\n len(original_word) == len(encoded_word) and\n original_word[0] == encoded_word[0] and\n original_word[-1] == encoded_word[-1] and\n sorted(original_word[1:-1]) == sorted(encoded_word[1:-1])\n )", "def check_strings(aword, anotherword):\n if aword == anotherword:\n return True\n else:\n return False", "def _strings_are_equal(self, value1, value2):\n if value1.lower().strip().replace('_', ' ') == value2.lower().strip().replace('_', ' '):\n return 1\n return 0", "def similar_string(first_string, second_string):\n score = score_match(first_string, second_string)\n\n if score >= SCORE_THRESHOLD_NORMAL:\n return True\n\n return False", "def compare_words(self, word1, word2):\n return Counter(word1) == Counter(word2)", "def word_check(word):\n word1 = word[1:]\n if word1 not in word_dict: return False\n if not homophones (word, word1): return False\n \n \n word2 = word[0] + word[2:]\n if word2 not in word_dict: return False\n if not homophones(word, word2): return False\n\n return True", "def are_similar(left, right):\n left = left.lower()\n right = right.lower()\n if left == right:\n return True\n if left and left in right:\n return True\n if right and right in left:\n return True\n return False", "def robust_string_compare(a, b):\n return a.strip().lower() == b.strip().lower()", "def homophone_words(word_one, word_two, pron_dict):\n if word_one not in pron_dict or word_two not in pron_dict:\n return False\n return pron_dict[word_one] == pron_dict[word_two]", "def is_words_similar(string, model):\n\n if fuzz.ratio(string, model, score_cutoff=75):\n return True\n\n return False", "def detectCapitalUse(self, word: str) -> bool:\n if not word:\n return True\n\n head_upper = word[0].isupper()\n\n # except for the head\n has_lower = False\n has_upper = False\n for w in word[1:]:\n if w.isupper():\n has_upper = True\n if has_lower or not head_upper:\n return False\n else:\n has_lower = True\n if has_upper:\n return False\n return True", "def is_anagram(word1, word2):\n w1 = word1.lower().replace(' ', '')\n w2 = word2.lower().replace(' ', '')\n if len(w1) != len(w2):\n return False\n return all(x == y for x, y in zip(sorted(w1.lower()), sorted(w2.lower())))", "def rhymes(self,a,b):\r\n \r\n a=a.lower()\r\n b=b.lower()\r\n if(a in self._words): ##check if A is in the dict\r\n checkA=1\r\n soundA=self._pronun[a]\r\n lenA=len(soundA)\r\n #print(soundA)\r\n else :\r\n return False\r\n if(b in self._words): ##check if B is in dict\r\n checkB=1\r\n soundB=self._pronun[b]\r\n lenB=len(soundB)\r\n #print(soundB)\r\n else:\r\n return False\r\n \r\n if((checkA==1) and (checkB==1)): ##if both in dict then move ahead\r\n #print(lenA,lenB)\r\n \r\n for countA in range(lenA):\r\n if soundA[countA][0][0] not in ['A','E','I','O','U']:\r\n soundA[countA]=soundA[countA][1:]\r\n\r\n for countA in range(lenA):\r\n soundA[countA]=''.join(soundA[countA])\r\n \r\n # print(soundA)\r\n \r\n\r\n for countB in range(lenB):\r\n if soundB[countB][0][0] not in ['A','E','I','O','U']:\r\n soundB[countB]=soundB[countB][1:]\r\n\r\n for countB in range(lenB):\r\n soundB[countB]=''.join(soundB[countB])\r\n\r\n #print(soundB)\r\n \r\n else:\r\n return False\r\n\r\n rhyme_count=0\r\n \r\n for countA in range(lenA):\r\n for countB in range(lenB):\r\n if((soundA[countA].endswith(soundB[countB]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n\r\n for countB in range(lenB):\r\n for countA in range(lenA):\r\n if((soundB[countB].endswith(soundA[countA]))==True):\r\n #print('substring matched')\r\n rhyme_count=rhyme_count+1\r\n \r\n if(rhyme_count>0):\r\n #print('True') \r\n return True\r\n else:\r\n # print('False')\r\n return False", "def rhymes(self, a, b):\n # match everything after the first 1 for all combinations\n # check if a is present in the CMU dictionary\n if a in self._pronunciations :\n a_pronunciations = self._pronunciations[a.lower()]\n else :\n return False\n \n # check if b is present in the CMU dictionary\n if b in self._pronunciations :\n b_pronunciations = self._pronunciations[b.lower()]\n else :\n return False\n \n \n ret_val = False\n a_string = \"\" \n b_string = \"\"\n for a_pronunciation in a_pronunciations:\n a_string = \"\" \n for phoneme in a_pronunciation : \n a_string += phoneme\n for b_pronunciation in b_pronunciations: \n b_string = \"\" \n for phoneme in b_pronunciation : \n b_string += phoneme\n \n if \"1\" in a_string :\n a_string = a_string[a_string.index(\"1\"):]\n if \"1\" in b_string :\n b_string = b_string[b_string.index(\"1\"):]\n\n if a_string == b_string : \n ret_val = True\n \n return ret_val", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def str_islower__Rope(space, w_self):\n l = w_self._node.length()\n \n if l == 0:\n return space.w_False\n cased = False\n iter = rope.ItemIterator(w_self._node)\n for idx in range(l):\n c = iter.nextchar()\n if c.isupper():\n return space.w_False\n elif not cased and c.islower():\n cased = True\n return space.newbool(cased)", "def _is_same_color(p1: str, p2: str):\n return p1.islower() == p2.islower()", "def verify_anagrams(first_word, second_word):\n first_word = sorted(first_word.lower().replace(' ', '')) # list of lower letter in word without\n second_word = sorted(second_word.lower().replace(' ', '')) # whitespaces, sorted ascending\n\n if len(first_word) != len(second_word):\n return False\n else:\n if first_word != second_word:\n return False\n else:\n return True", "def verify(self, word):\n if len(word) < 2:\n return (True, word)\n\n if word.lower() in self.replacement_words.keys():\n return (True, self.replacement_words[word.lower()])\n\n if word.lower() in self.word_list:\n return (True, word)\n\n if word.lower() in self.ignored_words:\n return (True, word)\n\n return (False, word)", "def end_other(s_1, s_2):\n str_1 = s_1[-3:]\n str_2 = s_2[-3:]\n\n if(str_1.lower() == s_2.lower()):\n \n isValid = True\n elif(str_2.lower() == s_1.lower()):\n isValid = True\n else:\n isValid = False\n return isValid", "def isEqualNoCase(string1, string2):\n return (True)", "def one_away(w1, w2):\n\n if abs(len(w1) - len(w2) > 1):\n return False\n\n # i = 0\n # w1_d = {}\n # w2_d = {}\n\n # for i in w1:\n # w1_d[i] = w1.count(i)\n\n # for j in w2:\n # w2_d[j] = w2.count(j)\n\n # unmatched = set(w1_d.items())^set(w2_d.items())\n \n # if len(unmatched) > 2:\n # return False\n # return True\n \n if len(w2) > len(w1):\n w1, w2 = w2, w1\n\n # Keep track of number of wrong letters\n diff = 0\n\n # Loop over w1 with i and over w2 with j\n i = j = 0\n\n # while j < len(w2):\n\n # if w1[i] != w2[j]:\n\n # # We found a wrong letter\n # wrong += 1\n # # We'll move to the next char in the longer string.\n # i += 1\n # if wrong > 1:\n # return False\n\n # # If same length, move the next char in shorter.\n # # Otherwise, don't move in shorter string --- this\n # # will cover the case of a added letter.\n # if len(w1) == len(w2):\n # j += 1\n\n # else:\n # # Both letters match; move to next letter in both\n # i += 1\n # j += 1\n\n # return True\n\n # iterate over 1 word - shorter of the two, so there is no index out of range error\n # as i, j increments\n while j < len(w2):\n # if letter are different, add to diff variable\n if w1[i] != w2[j]:\n diff += 1\n # as soon as diff is more than 1, than it's fast fail\n if diff > 1:\n return False\n # two scenarios: if same length for both words, both go on check next \n # word\n if len(w1) == len(w2):\n i += 1\n j += 1\n \n else: #if one word is longer than the other, go on to next letter in \n # longer word, and see if it matches previous letter in shorter word\n # because this is a case where extra letter is added in the middle of long\n # word, but the rest should be the same as the shorter\n i += 1\n else:\n i += 1\n j += 1\n return True", "def arrayStringsAreEqual1(self, word1: List[str], word2: List[str]) -> bool:\n word1str = ''.join(word1)\n word2str = ''.join(word2)\n return word1str == word2str", "def is_palindrome(word: str) -> bool:\n\n # Todo\n return False", "def check_string(str_one, str_two):\n str_one = str_one.lower()\n str_two = str_two.lower()\n # print(str_one,str_two)\n if len(str_two) < len(str_one):\n return bool(re.search(str_two+'$',str_one))\n else:\n return bool(re.search(str_one+'$',str_two))", "def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True", "def get_word_distance(word1: str, word2: str) -> bool:\n\n letters_different = 0\n for x, y in zip(word1, word2):\n if x != y:\n letters_different += 1\n if letters_different > 1:\n return False\n\n return True", "def is_palindrome(sentence: str) -> bool:\n\n words = extract_words(sentence)\n middle = floor(len(words)/2)\n\n for index in range(middle):\n if words[index].lower() != words[len(words)-index-1].lower():\n return False\n\n return True", "def is_canonical(hybrids):\n mrhyb = hybrids[2].upper().replace(\"U\", \"T\")\n mirhyb = hybrids[0].upper().replace(\"U\", \"T\")\n hybrid = hybrids[1]\n \"\"\"\n 2-8\n \"\"\"\n if hybrid[1:8] == \"|||||||\":\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:8], mrhyb[1:8]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-8-Gwoble\"\n else:\n return True, \"2-8\"\n elif (hybrid[1:7] == \"||||||\" and mrhyb[0] == 'A'):\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:7], mrhyb[1:7]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-7-A-Gwoble\"\n else:\n return True, \"2-7-A\"\n else:\n if hybrid[0:7] == \"|||||||\":\n return False, \"1-7-ElMMo\"\n elif hybrid[1:7] == \"||||||\":\n return False, \"6-mer\"\n if \"v\" in hybrid[0:8]:\n return False, \"mRNAbulge\"\n elif \"^\" in hybrid[0:8]:\n return False, \"miRNAbulge\"\n elif \"O\" in hybrid[0:8]:\n return False, \"symmetric_loop\"\n else:\n return False, \"unknown\"", "def compare_plaintext(self, a, b):\n a = self.make_comparable(a)\n b = self.make_comparable(b)\n return b.startswith(a) or a.startswith(\n b\n ) # i.e. ignore any final random letters", "def include_word(word, chardict):\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False", "def match(self, words):\n return words == self.words(len(words))", "def is_lower(self):\n return self.rep.is_lower()", "def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True", "def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary", "def islower(self):\n return islower(self)", "def check_anagrams(self, sequence1, sequence2):\n if len(sequence1) != len(sequence2):\n # If length is not the same return false\n return False\n else:\n # Assumes case doesn't matter by default\n return self.compare_words(sequence1.lower(), sequence2.lower())", "def is_anagram(word1: str, word2: str):\n word1 = word1.lower().replace(' ', '')\n word2 = word2.lower().replace(' ', '')\n chars_1 = [char for char in word1]\n chars_2 = [char for char in word2]\n\n if (all(char in word2 for char in chars_1) and\n all(char in word1 for char in chars_2) and\n len(word1) == len(word2)):\n return True\n else:\n return False", "def is_a_word(self, word):\n word = word.lower()\n if word in self.data:\n return True\n else:\n # for char in word:\n # if char.isnumeric():\n # return True\n word = list(word)\n numbers = len([x for x in word if x.isnumeric()])\n # # letters = len([x for x in word if x.isalpha()])\n if numbers >= 2 or numbers/len(word) > 0.4:\n return True\n return False", "def is_consonant(text):\n return text.lower() in AVRO_CONSONANTS", "def two_word_finder(word1,word2,text):\r\n word1 = word1.lower()\r\n word2 = word2.lower()\r\n text = str(text).lower()\r\n if word1 and word2 in text:\r\n return True #return text to see specific tweets\r\n return False", "def filter1(word):\n if not word: return False\n w = word.lower()\n if w in STOPWORDS: return False\n return True", "def check_sentence_sanity(self, sentence):\n case_dist = nltk.FreqDist()\n\n for token in sentence:\n case_dist[self.get_casing(token)] += 1\n\n if case_dist.most_common(1)[0][0] != \"allLower\":\n return False\n\n return True", "def is_anagram(s1, s2):\n s1 = s1.lower()\n s2 = s2.lower()\n if (sorted(s1) == sorted(s2)):\n return True\n else:\n return False", "def allcap_differential(words):\n is_different = False\n allcap_words = 0\n for word in words:\n if word.isupper():\n allcap_words += 1\n cap_differential = len(words) - allcap_words\n if 0 < cap_differential < len(words):\n is_different = True\n return is_different", "def is_case_sensitive(text):\n return text.lower() in AVRO_CASESENSITIVES", "def good_word(self, word):\r\n return word.strip().lower()", "def testTwoWords(self):\n\n\t\t\t\twords = ['business', 'directory']\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(words)\n\n\t\t\t\tassert len(synonyms)", "def is_palindrome_ingoring_case_and_non_letter_chars(text):", "def split_precondition(\n tokens: Sequence[str], words: Sequence[str], word_ends: Sequence[str]\n) -> bool:\n duplicated_word_ends = []\n for end1, end2 in zip(word_ends, word_ends[1:]):\n if end1 == end2:\n duplicated_word_ends.append(end1)\n\n if not duplicated_word_ends:\n return False\n\n duplicate_not_word = False\n for duplicate in duplicated_word_ends:\n if duplicate not in words:\n duplicate_not_word = True\n break\n\n if not duplicate_not_word:\n return False\n\n return True", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def is_valid(self, text):\n return any(p.lower() in text.lower() for p in self.get_phrases())", "def detectCapitalUse(self, word):\n\n # Check for no upper or all upper\n if all(l.isupper() for l in word) or all(l.islower() for l in word):\n return True\n elif word[0].isupper() and word[1:].islower():\n return True\n else:\n return False", "def test_words_closer_than(self):\n self.assertEqual(self.vectors.words_closer_than('dog.n.01', 'dog.n.01'), [])\n expected = set(['canine.n.02', 'hunting_dog.n.01'])\n self.assertEqual(set(self.vectors.words_closer_than('dog.n.01', 'carnivore.n.01')), expected)", "def check_word_capitalization(word):\n return_value = False\n if (len(word) > 1):\n return_value = True if (word[0].isupper() and word[1].islower()) else False\n return return_value", "def words_in_dictionary(word_list):\n for word in word_list:\n word = word.lower()\n raw_word = word.replace(\"'\", '').replace('.', '')\n if word not in DICTIONARY_LOWER and raw_word not in DICTIONARY_LOWER:\n return False\n return True", "def is_british_english_term(word: str) -> bool:\n word = process_word(word)\n return word in BRITISH_ENGLISH_ONLY_TERMS", "def rhymes(self, a, b):\n\n return False", "def is_anagram(word1, word2):\n return letters_in(word1) == letters_in(word2)\n\n # We'll need to normalize our unicode data to such that characters are decomposed into parts (so accents are treated separately from the character they accent). If we look up unicode normalization or search for how to ignore accent marks in unicode strings we'll find NFKD form. The unicodedata module can help us normalize our strings into NFKD form (NFD form should work as well):", "def lower_in_title(word, filename):\n title = get_title(filename)\n if word.lower() in title.lower():\n return True\n else:\n return False", "def twoStrings(s1, s2):\n\n set1 = set(s1)\n set2 = set(s2)\n\n for char in set1:\n if char in set2:\n return True\n\n return False", "def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result", "def areSentencesSimilarTwo(self, words1, words2, pairs):\n if not words1 and not words2:\n return True\n elif not words1 or not words2:\n return False\n elif not pairs and not self.compare(words1, words2):\n return False\n\n vocab = set()\n for w in words1:\n vocab.add(w)\n for w in words2:\n vocab.add(w)\n for (w1, w2) in pairs:\n vocab.add(w1)\n vocab.add(w2)\n\n parents = {w:w for w in vocab}\n ranks = {w:1 for w in vocab}\n\n for w1, w2 in pairs:\n pw1 = self.find(w1, parents)\n pw2 = self.find(w2, parents)\n if pw1 == pw2:\n continue\n\n if ranks[pw1] > ranks[pw2]:\n pw1, pw2 = pw2, pw1\n parents[pw1] = pw2\n ranks[pw2] += ranks[pw1]\n\n pwlist1 = []\n for w in words1:\n pw = self.find(w, parents)\n pwlist1.append(pw)\n\n pwlist2 = []\n for w in words2:\n pw = self.find(w, parents)\n pwlist2.append(pw)\n\n return self.compare(pwlist1, pwlist2)", "def replacement_allowed(self, word):\n not_list = ['was', 'were', 'is', 'are', 'have', 'has', 'had']\n for not_word in not_list:\n if word == not_word:\n return False\n return True", "def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True", "def text_compare(t1, t2):\n if not t1 and not t2:\n return True\n if t1 == '*' or t2 == '*':\n return True\n return (t1 or '').strip() == (t2 or '').strip()", "def palindrome(string: str) -> Bool:\n string_lower = string.lower()\n string_clean =", "def test_equality(self):\n\n # change .phones\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"P\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)\n\n # change .stress_pattern\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)", "def is_anagram(first, second):\r\n first = sorted(first.replace(\" \", \"\").lower())\r\n second = sorted(second.replace(\" \", \"\").lower())\r\n return first == second", "def misspelled_hometown(town_a, town_b, threshold=0.8):\n if fuzz.ratio(town_a, town_b) > threshold or fuzz.partial_token_set_ratio(town_a, town_b) > threshold:\n return True\n else:\n return False", "def valid_anagram(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(''.join(sorted(series_of_words.pop())))\n for word in series_of_words:\n word = ''.join(sorted(word))\n if word in words:\n return False\n words.append(word)\n return True", "def verify_anagrams(first_word, second_word):\n first = prep_dict(first_word)\n second = prep_dict(second_word)\n #print first, second\n for k,v in first.iteritems():\n try:\n if second[k] == v:\n pass\n else:\n return False\n except:\n return False\n return True", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False \n #remove punctuations for all sentences\n words_sentence1 = word_tokenize(sentences[0].translate(None, string.punctuation).lower())\n words_sentence2 = word_tokenize(sentences[1].translate(None, string.punctuation).lower())\n words_sentence3 = word_tokenize(sentences[2].translate(None, string.punctuation).lower())\n words_sentence4 = word_tokenize(sentences[3].translate(None, string.punctuation).lower())\n words_sentence5 = word_tokenize(sentences[4].translate(None, string.punctuation).lower())\n \n #check rhymes for AAA BB and not rhymes for AB\n ret_flag = (self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence2[len(words_sentence2) - 1]) and\n self.rhymes(words_sentence3[len(words_sentence3) - 1 ],\n words_sentence4[len(words_sentence4) - 1 ]) and\n self.rhymes(words_sentence2[len(words_sentence2) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and\n self.rhymes(words_sentence1[len(words_sentence1) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence4[len(words_sentence4) - 1])))\n \n if ret_flag == False: return False\n \n \n # Check additional constraints\n \n sum_of_syl1 = 0\n for word in words_sentence1 : sum_of_syl1 += self.num_syllables(word)\n \n if sum_of_syl1 < 4 : return False\n sum_of_syl2 = 0\n for word in words_sentence2 : sum_of_syl2 += self.num_syllables(word)\n \n if sum_of_syl2 < 4 : return False\n \n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl2 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl2\n else : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl3 = 0\n for word in words_sentence3 : sum_of_syl3 += self.num_syllables(word)\n \n if sum_of_syl3 < 4 : return False\n sum_of_syl4 = 0\n for word in words_sentence4 : sum_of_syl4 += self.num_syllables(word)\n \n if sum_of_syl4 < 4 : return False\n \n \n sum_of_syl_B_diff = 0\n if sum_of_syl3 > sum_of_syl4 : sum_of_syl_B_diff = sum_of_syl3 - sum_of_syl4\n else : sum_of_syl_B_diff = sum_of_syl4 - sum_of_syl3\n \n if sum_of_syl_B_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl1 and sum_of_syl3 > sum_of_syl2 \n and sum_of_syl4 > sum_of_syl1 and sum_of_syl4 > sum_of_syl2) : return False\n \n \n sum_of_syl5 = 0\n for word in words_sentence5 : sum_of_syl5 += self.num_syllables(word) \n \n if sum_of_syl5 < 4 : return False\n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl_A_diff = 0\n if sum_of_syl2 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl2\n \n \n if sum_of_syl_A_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl5 and sum_of_syl4 > sum_of_syl5) : return False\n \n \n return ret_flag", "def not_equal_to_case_insensitive(self, other_string):\n return self.value.lower() != other_string.lower()", "def is_anagram(a,b):\n #rhis function will take 2 text and check if they are anagrams return True, if they're not return False.\n list_ch_a,list_ch_b = [],[]\n for ch in a:\n if ch != \" \":\n list_ch_a += ch.lower()\n for ch in b:\n if ch != \" \":\n list_ch_b += ch.lower()\n if sorted(list_ch_a) == sorted(list_ch_b):\n return True\n else:\n return False", "def is_word(wordlist, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in wordlist", "def _likely_yelling_in(content):\n return (\n # All upper case is yelling\n (content == content.upper())\n \n # But no letters at all means otherwise\n and (content.lower() != content.upper()) \n )", "def test_normalize_phrase(self):\n test_pairs = [\n [\"Commissioner v. Palin\", \"palin\"],\n [\"Commr v. Palin\", \"palin\"],\n [\"Comm'r v. Palin\", \"palin\"],\n [\n \"United States v. Learned Hand et. al.\",\n \"unitedstateslearnedhand\",\n ],\n [\"Baker, Plaintiff v. Palin, Defendant\", \"bakerpalin\"],\n ]\n for pair in test_pairs:\n self.assertEqual(\n normalize_phrase(harmonize(clean_string(pair[0]))), pair[1]\n )", "def is_palindrome(word):\n word = word.replace(\" \", \"\")\n new_word = word.lower()\n\n return new_word == new_word[::-1]", "def str_istitle__Rope(space, w_self):\n cased = False\n previous_is_cased = False\n\n iter = rope.ItemIterator(w_self._node)\n for pos in range(0, w_self._node.length()):\n ch = iter.nextchar()\n if ch.isupper():\n if previous_is_cased:\n return space.w_False\n previous_is_cased = True\n cased = True\n elif ch.islower():\n if not previous_is_cased:\n return space.w_False\n cased = True\n else:\n previous_is_cased = False\n\n return space.newbool(cased)", "def is_cap_word(self, word):\n try:\n return word[0].isupper()\n except:\n return False", "def equalPrefix(self, other):\n return self.prefix.strip().upper() == other.prefix.strip().upper() and \\\n self.suffix.strip().upper() == other.suffix.strip().upper()", "def string_palidrome(word):\n if word == string_reverse(word):\n return True\n else:\n return False", "def _is_duplicate(a: str, b: str) -> bool:\n la = len(a)\n lb = len(b)\n diff = abs(la - lb)\n if diff > 50:\n return False\n denom = min(la, lb) + diff / 2\n ratio = levenshtein(a.casefold(), b.casefold()) / denom\n return ratio < 0.1", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False", "def is_stopword(self, word, language):", "def hasConstantForm(self, sentence):", "def is_word(self, sequence: str) -> bool:\n\n for non_terminal in self._non_terminals:\n if non_terminal in sequence:\n return False\n\n return True", "def exactSetMatch(self, mention, ignoreSemanticTagList=[]):\n \n dWords = self.importantWords(ignoreSemanticTagList)\n aWords = mention.importantWords(ignoreSemanticTagList) \n\n if len(aWords) == 0:\n # annotated mention consists of \"unimportant\" words.\n # use all words in mention\n dWords = self.allWords()\n aWords = mention.allWords() \n\n if len(dWords) > 0 and dWords == aWords:\n return True\n else:\n return False" ]
[ "0.71012914", "0.690699", "0.6702049", "0.6633581", "0.6579332", "0.6557945", "0.6547502", "0.6540323", "0.6418574", "0.6399188", "0.6355479", "0.6346227", "0.6344378", "0.6340427", "0.6335884", "0.6317267", "0.6293736", "0.62759924", "0.6260704", "0.62522906", "0.6198525", "0.6191492", "0.6181529", "0.6165622", "0.6151826", "0.61323166", "0.61249346", "0.61245036", "0.6099195", "0.60808265", "0.60606", "0.6025732", "0.6023552", "0.6023469", "0.60234255", "0.6023282", "0.6019106", "0.6015093", "0.5997458", "0.598565", "0.5977427", "0.5973034", "0.5972417", "0.5963119", "0.59603035", "0.59587747", "0.5956348", "0.59536886", "0.5940382", "0.5925882", "0.5919809", "0.5918063", "0.59032375", "0.58936584", "0.58834", "0.58770394", "0.5872879", "0.58304137", "0.58286047", "0.58230174", "0.5791833", "0.57885474", "0.5782202", "0.5772227", "0.5766659", "0.57586336", "0.57575846", "0.57427907", "0.57233715", "0.57181716", "0.57135737", "0.5708162", "0.5702751", "0.5691268", "0.56909925", "0.5667827", "0.5665647", "0.56604254", "0.5658598", "0.5629111", "0.5619538", "0.5619354", "0.5618503", "0.56136155", "0.5613466", "0.5613288", "0.5609863", "0.560582", "0.5600137", "0.5593581", "0.5593458", "0.559265", "0.55879086", "0.55875736", "0.5587317", "0.5569231", "0.55617636", "0.5558195", "0.55567807", "0.55525655" ]
0.60315377
31
Takes text where lines are separated by newline characters. Returns True if the text is a limerick, False otherwise. A limerick is defined as a poem with the form AABBA, where the A lines rhyme with each other, the B lines rhyme with each other, and the A lines do not rhyme with the B lines.
def is_limerick(self, text): # TODO: provide an implementation! text = text.lower() p = [] p = text.split('\n') p = [i.strip(' ') for i in p] p = list(filter(None, p)) # all limericks must have 5 lines AABBA if len(p) != 5: return False #words list stores the list of words in each line of the limerick words = [] for i in range(0, 5): p[i] = p[i].strip(".,:;?!") temp = [] T = p[i] temp = self.apostrophe_tokenize(T) words.append(temp) count = [] #print len(words) for i in range(0, 5): #print words[i] n = 0 for j in words[i]: n = n + self.num_syllables(j) count.append(n) # check if any line has fewer than 4 syllables for i in count: if i < 4: return False A1 = count[0] A2 = count[1] B1 = count[2] B2 = count[3] A3 = count[4] # check if B1 has fewer syllables than A1, A2 and A3 if B1 > A1 or B1 > A2 or B1 > A3: return False # check if B2 has fewer syllables than A1, A2 and A3 if B2 > A1 or B2 > A2 or B2 > A3: return False # check if the no of syllables in B1 and B2 differs by more than 2 if abs(B1 - B2) > 2: return False # check if any two A's differ in no of syllables by more than 2 if abs(A1 - A2) > 2 or abs(A1 - A3) > 2 or abs(A2 - A3) > 2: return False #check if A1, A2 and A3 rhyme with each other if self.rhymes(words[0][-1], words[1][-1]) and self.rhymes(words[0][-1], words[4][-1]) and self.rhymes(words[1][-1], words[4][-1]): #check if B1 and B2 rhyme with each other if self.rhymes(words[2][-1],words[3][-1]): #check if A and B do not rhyme if (not self.rhymes(words[0][-1], words[2][-1]) and not self.rhymes(words[0][-1], words[3][-1]) and not self.rhymes(words[1][-1], words[2][-1]) and not self.rhymes(words[1][-1], words[3][-1]) and not self.rhymes(words[4][-1], words[2][-1]) and not self.rhymes(words[4][-1], words[3][-1]) ): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_limerick(self, text):\n\n return False", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False \n #remove punctuations for all sentences\n words_sentence1 = word_tokenize(sentences[0].translate(None, string.punctuation).lower())\n words_sentence2 = word_tokenize(sentences[1].translate(None, string.punctuation).lower())\n words_sentence3 = word_tokenize(sentences[2].translate(None, string.punctuation).lower())\n words_sentence4 = word_tokenize(sentences[3].translate(None, string.punctuation).lower())\n words_sentence5 = word_tokenize(sentences[4].translate(None, string.punctuation).lower())\n \n #check rhymes for AAA BB and not rhymes for AB\n ret_flag = (self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence2[len(words_sentence2) - 1]) and\n self.rhymes(words_sentence3[len(words_sentence3) - 1 ],\n words_sentence4[len(words_sentence4) - 1 ]) and\n self.rhymes(words_sentence2[len(words_sentence2) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and\n self.rhymes(words_sentence1[len(words_sentence1) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence4[len(words_sentence4) - 1])))\n \n if ret_flag == False: return False\n \n \n # Check additional constraints\n \n sum_of_syl1 = 0\n for word in words_sentence1 : sum_of_syl1 += self.num_syllables(word)\n \n if sum_of_syl1 < 4 : return False\n sum_of_syl2 = 0\n for word in words_sentence2 : sum_of_syl2 += self.num_syllables(word)\n \n if sum_of_syl2 < 4 : return False\n \n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl2 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl2\n else : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl3 = 0\n for word in words_sentence3 : sum_of_syl3 += self.num_syllables(word)\n \n if sum_of_syl3 < 4 : return False\n sum_of_syl4 = 0\n for word in words_sentence4 : sum_of_syl4 += self.num_syllables(word)\n \n if sum_of_syl4 < 4 : return False\n \n \n sum_of_syl_B_diff = 0\n if sum_of_syl3 > sum_of_syl4 : sum_of_syl_B_diff = sum_of_syl3 - sum_of_syl4\n else : sum_of_syl_B_diff = sum_of_syl4 - sum_of_syl3\n \n if sum_of_syl_B_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl1 and sum_of_syl3 > sum_of_syl2 \n and sum_of_syl4 > sum_of_syl1 and sum_of_syl4 > sum_of_syl2) : return False\n \n \n sum_of_syl5 = 0\n for word in words_sentence5 : sum_of_syl5 += self.num_syllables(word) \n \n if sum_of_syl5 < 4 : return False\n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl_A_diff = 0\n if sum_of_syl2 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl2\n \n \n if sum_of_syl_A_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl5 and sum_of_syl4 > sum_of_syl5) : return False\n \n \n return ret_flag", "def is_line_on_multiline(feature_1: Sequence, feature_2: Sequence) -> bool:\n return any(is_line_on_line(feature_1, coords_2) for coords_2 in feature_2)", "def test_LogicalLines(self) -> None:\n content = \"\"\"\nfoo \\\\\nbar \\\\\nbaz\nfoo\nbling \\\\\nbling \\\\ bling\nbling\n\"\"\"\n fobj = io.StringIO(content)\n lines = LogicalLines(fobj).readlines()\n assert lines == [\n '\\n',\n 'foo bar baz\\n',\n 'foo\\n',\n 'bling bling \\\\ bling\\n',\n 'bling\\n',\n ], lines", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def IsMultiline(self):\r\n\r\n return \"\\n\" in self.caption", "def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])", "def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('•', '\\n',text)\n return text", "def _has_newline(line) -> bool:\n if line and (\"\\r\" in line or \"\\n\" in line):\n return True\n return False", "def is_sonnet(poem):\n return len([line for line in poem.split(\"\\n\") if line]) == 14", "def detect_nl(string_or_lines, line_end=None):\n if line_end is None:\n line_end = '\\n' if (string_or_lines and\n string_or_lines[-1].endswith('\\n')) else ''\n return line_end", "def is_text(line, start, end, line_number, code_blocks):\n if any(c[0] <= line_number <= c[1] for c in code_blocks):\n return False\n else:\n n = len(line)\n idx = -1\n last_block_was_text = False\n in_link = False\n in_url = False\n while idx < start:\n if in_link:\n link_idx = line[idx+1:].find(')')\n assert link_idx != -1\n code_idx = n\n url_idx = n\n elif in_url:\n url_idx = line[idx+1:].find('>')\n assert url_idx != -1\n code_idx = n\n link_idx = n\n else:\n code_idx = line[idx+1:].find('`')\n link_idx = line[idx+1:].find('](')\n url_idx = line[idx+1:].find('<')\n if code_idx == -1:\n code_idx = n\n if link_idx == -1:\n link_idx = n\n if url_idx == -1:\n url_idx = n\n\n nearest_match = min(code_idx, link_idx, url_idx)\n\n if nearest_match == url_idx:\n in_url = not in_url\n elif nearest_match == link_idx:\n in_link = not in_link\n idx += nearest_match+1\n last_block_was_text = not last_block_was_text\n\n return last_block_was_text", "def match_multiline(self, text, delimiter, in_state, style):\n # If inside triple-single quotes, start at 0\n if self.previousBlockState() == in_state:\n start = 0\n add = 0\n # Otherwise, look for the delimiter on this line\n else:\n start = delimiter.indexIn(text)\n # Move past this match\n add = delimiter.matchedLength()\n\n # As long as there's a delimiter match on this line...\n while start >= 0:\n # Look for the ending delimiter\n end = delimiter.indexIn(text, start + add)\n # Ending delimiter on this line?\n if end >= add:\n length = end - start + add + delimiter.matchedLength()\n self.setCurrentBlockState(0)\n # No; multi-line string\n else:\n self.setCurrentBlockState(in_state)\n length = len(text) - start + add\n # Apply formatting\n self.setFormat(start, length, self.styles[style])\n # Look for the next match\n start = delimiter.indexIn(text, start + length)\n\n # Return True if still inside a multi-line string, False otherwise\n if self.currentBlockState() == in_state:\n return True\n else:\n return False", "def make_line(line, n_syl, syl_counts):\n\n # Current number of syllables in constructed line.\n # This includes the syllable count of the first word.\n curr = 0\n\n # Now, since the list is reversed, the last word of the actual sonnet\n # line is the first word of 'line'. So we want to check if this\n # word can be counted as one syllable.\n\n # Number of syllable in first word (last word of actual line)\n init_syl = syl_counts[line[0]]\n init_syl_alt = init_syl\n\n # Alternative syllable count\n if ((line[0] + '_') in syl_counts):\n init_syl_alt = syl_counts[line[0] + '_']\n\n for i in range(1, n_syl):\n if line[i] not in syl_counts:\n return (False, '')\n\n w_syl = syl_counts[line[i]]\n\n if init_syl + curr + w_syl and init_syl_alt + curr + w_syl > n_syl:\n return (False, '')\n if init_syl+ curr + w_syl == n_syl or init_syl_alt + curr + w_syl == n_syl:\n return (True, ' '.join(line[:i+1]))\n curr += w_syl", "def is_line(self): \n return False", "def is_line(self):\n return True", "def is_line(self):\n return True", "def handleNewLineBeforeEditor(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n # autoIndent, autoBullet, autoUnbullet\r\n \r\n return True", "def is_line_on_line(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n line_on_line = False\n\n for coords in feature_1:\n\n line_on_line = boolean_point_on_line(coords, feature_2)\n if not line_on_line:\n break\n\n return line_on_line", "def test_format_linebreaks():\r\n test_cases = (\r\n ('Simple:\\n\\nLine two', '<p>Simple:</p><p>Line two</p>'),\r\n ('DOS:\\r\\n\\r\\nLine breaks', '<p>DOS:</p><p>Line breaks</p>'),\r\n ('Classic Mac:\\r\\rLine breaks', '<p>Classic Mac:</p><p>Line breaks</p>'),\r\n ('Consecutive:\\n\\n\\n\\n\\n\\nLine breaks', '<p>Consecutive:</p><p>Line breaks</p>'),\r\n ('Multiple:\\r\\n\\r\\nLine\\r\\n\\r\\nbreaks', '<p>Multiple:</p><p>Line</p><p>breaks</p>'),\r\n ('\\nLeading and trailing\\n', '<p>Leading and trailing</p>'),\r\n ('Single\\ndoesn\\'t wrap', '<p>Single\\ndoesn\\'t wrap</p>'),\r\n ('Quote:\\n\\n<blockquote>(1) One\\n\\n(2) Two</blockquote>\\n\\nAfter',\r\n '<p>Quote:</p><blockquote><p>(1) One</p><p>(2) Two</p></blockquote><p>After</p>'),\r\n ('Quote 2:\\n\\n<blockquote>(1) One\\n\\n(2) Two\\n</blockquote>\\n\\nAfter',\r\n '<p>Quote 2:</p><blockquote><p>(1) One</p><p>(2) Two\\n</p></blockquote><p>After</p>'),\r\n )\r\n for input_text, expected_output in test_cases:\r\n yield is_equal, format_linebreaks(input_text), expected_output", "def blocks_are_equal(i, j, types, text, n):\n while i < n and j < n:\n if text[i] == text[j]:\n if is_lms(i, types) and is_lms(j, types):\n return True\n else:\n i += 1\n j += 1\n else:\n return False\n return False", "def single_line_paragraph(s: str) -> bool:\n return s.startswith('@') or s.strip() in ('\"\"\"', \"'''\")", "def is_line(self):\n return False", "def is_multipoint_on_linestring(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n points_on_line = False\n\n points_on_line = all(\n boolean_point_on_line(coords_1, feature_2[1]) for coords_1 in feature_1[1]\n )\n\n if not points_on_line:\n return points_on_line\n\n points_on_line = any(\n boolean_point_on_line(coords_1, feature_2[1], {\"ignoreEndVertices\": True})\n for coords_1 in feature_1[1]\n )\n\n return points_on_line", "def logicalLines(iterable, **kwargs):\n # kwargs\n kwargs = lowerKeys(kwargs)\n continueChar = kwargs.get('continuechar', '-')\n commentChar = kwargs.get('commentchar', '!')\n #\n iterable = ( line.strip() for line in iterable )\n tmp = []\n for line in iterable:\n if line.split(commentChar)[0].endswith(continueChar):\n tmp.append(line[:-1])\n else:\n if tmp:\n tmp.append(line)\n yield ' '.join(tmp)\n tmp = []\n else:\n yield line\n # flush\n if tmp:\n yield ' '.join(tmp)", "def ll(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('ll() expects two lines')\n return L1.normal_vector() ** L2.normal_vector() == 0", "def is_mlcmt(line,mlcmto,mlcmtc):\n \n return [line.find(mlcmto),line.find(mlcmtc)]", "def _l_(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('_l_() expects two lines')\n return L1.normal_vec() * L2.normal_vec() == 0", "def paragraphify(text: str) -> str:\n text = text and text.replace('\\r', '').strip('\\n')\n\n if not text:\n return ''\n\n return ''.join(f'<p>{p}</p>' for p in (\n p.replace('\\n', '<br>') for p in _multiple_newlines.split(text)\n ))", "def parseLines(text):\n lines = text.strip().split('\\n')\n esclines = []\n esc = False\n for l in lines:\n if esc:\n esclines[-1] = esclines[-1]+l\n else:\n esclines.append(l)\n if len(l)>0 and l[-1]=='\\\\':\n esclines[-1] = esclines[-1][:-1]\n esc = True\n else:\n esc = False\n return esclines", "def createPoem(self, nsyl=10, lstan=4, nstan=6, rhyme=True):\n if nsyl < 5:\n return 'nsyl must be >= 5'\n # more then one sentence section per verse\n stanzas = []\n for i in range(nstan):\n stanza = ''\n for j in (0, 1):\n nsyl0 = nsyl1 = nsyl//2\n if nsyl%2 == 0:\n nsyl1 -= 1\n s = self.createPhrase(nsyl0)\n v = self.createPhrase(nsyl1, vowel=True)\n o = self.createPhrase(nsyl1, vowel=True)\n p = n.random.choice(self.prepositions)\n while len(self._getSyllables(p)) >= nsyl1:\n p = n.random.choice(self.prepositions)\n if o[-1] in self.vowels and p[0] in self.vowels:\n elision = 1\n else:\n elision = 0\n nsyl_ = nsyl - (nsyl1 + 1) - len(self._getSyllables(p)) + elision\n p_ = self.createPhrase(nsyl_, vowel=p[-1] in self.vowels)\n stanza += s + ' li ' + v\n stanza += '\\ne ' + o + (' %s %s\\n' % (p, p_))\n stanzas.append(stanza[:-1])\n poem = \"\\n\\n\".join(stanzas)\n return poem", "def add_paragraph_marks(text, keep_line_endings=True, maxlength=72):\n\n # add # after line that ends with full stop, question and exclamation marks:\n ptrn = r\"([.؟!] *[\\r\\n]+(?:PageV\\w{2}P\\d+[abAB]?[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n # add # after section titles (but not before page numbers and sub-titles)\n ptrn = r\"(### .+[\\r\\n]+(?:PageV\\w{2}P\\d+[\\r\\n]+)?)([^\\r\\n#P\\Z])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n\n if keep_line_endings:\n # add the tildas for continued lines:\n new_text = \"\"\n for line in re.split(r\"([\\r\\n]+)\", text):\n if not line.startswith((\"P\", \"#\", \"~~\")) \\\n and not re.match(r\"[\\r\\n]+\", line):\n line = \"~~\"+line\n new_text += line\n else:\n # move page number to the previous line:\n ptrn = r\"([^ \\r\\n.؟!]) *[\\r\\n]+(PageV[^P]+P[\\w]+) *[\\r\\n]+\"\n text = re.sub(ptrn, r\"\\1 \\2 \", text)\n # Add paragraph signs before every new line:\n ptrn = r\"([\\r\\n]+)([^\\r\\n#P\\s])\"\n text = re.sub(ptrn, r\"\\1# \\2\", text)\n # break long lines into shorter lines:\n new_text = wrap(text, maxlength)\n\n new_text = re.sub(\"~~#\", \"#\", new_text)\n new_text = re.sub(r\"~~([^\\n]+%~%)\", r\"# \\1\", new_text)\n new_text = re.sub(r\"~~\\.\\./\", \"../\", new_text)\n\n return new_text", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def _split_multiline_prompt(\n get_prompt_text: _StyleAndTextTuplesCallable,\n) -> tuple[\n Callable[[], bool], _StyleAndTextTuplesCallable, _StyleAndTextTuplesCallable\n]:\n\n def has_before_fragments() -> bool:\n for fragment, char, *_ in get_prompt_text():\n if \"\\n\" in char:\n return True\n return False\n\n def before() -> StyleAndTextTuples:\n result: StyleAndTextTuples = []\n found_nl = False\n for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):\n if found_nl:\n result.insert(0, (fragment, char))\n elif char == \"\\n\":\n found_nl = True\n return result\n\n def first_input_line() -> StyleAndTextTuples:\n result: StyleAndTextTuples = []\n for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):\n if char == \"\\n\":\n break\n else:\n result.insert(0, (fragment, char))\n return result\n\n return has_before_fragments, before, first_input_line", "def linebreaks(text):\n return smart_unicode(text).replace('\\n', '<br />')", "def test_no_persist_not_terminated_multiline_blocked_region(self, style):\n result = self._spellcheck_lint(\"{s}{e}\\n\"\n \"{s} ```splelling\\n\"\n \"{m} eror {e}\\n\"\n \"technical_term\",\n style)\n self.assertTrue(result)", "def process_output(self, text):\n new_lines = []\n changed = False\n for line_processor in self.line_processors:\n line_processor.reset()\n\n for line in text.splitlines():\n # -- LINE PROCESSING PIPELINE:\n original_line = line\n for line_processor in self.line_processors:\n line = line_processor(line)\n\n if line != original_line:\n changed = True\n new_lines.append(line)\n\n if changed:\n text = \"\\n\".join(new_lines) + \"\\n\"\n return changed, text", "def __CompareText(self, s1, s2):\n # The \"splitlines\" method works independently of the line ending\n # convention in use.\n return s1.splitlines() == s2.splitlines()", "def test_no_persist_multiline_blocked_region(self, style):\n result = self._spellcheck_lint(\"{s}{e}\\n\"\n \"{s} ```splelling\\n\"\n \"{m} eror``` {e}\\n\"\n \"technical_term\",\n style)\n self.assertTrue(result)", "def check_paragraph(line):\n if len(line) > 3 and line[:3] == '⋅⋅⋅':\n return '<p>' + line[3:] + '</p>'\n else:\n return line", "def _detect_bullets(self, item):\n if item[\"text\"] and item[\"text\"][0]: \n matches = re.findall(r'[a-z]', item[\"text\"][0].lower())\n if len(matches) == 0: \n return True \n return False", "def line_valid(line: str) -> bool:\n\n return line != ' ' and line != ''", "def detect_newline(string: str) -> str:\n first_lf_pos = string.find(\"\\n\")\n if first_lf_pos > 0 and string[first_lf_pos - 1] == \"\\r\":\n return \"\\r\\n\"\n return \"\\n\"", "def lInLg(text):\n while re.search(r'(<lg>)(((?!</lg>).)*?)<(/?)p>', text, flags=re.DOTALL|re.IGNORECASE) is not None:\n text = re.sub(r'(<lg>)(((?!</lg>).)*?)<(/?)p>', r'\\1\\2<\\4l>', text, flags=re.DOTALL|re.IGNORECASE)\n return text", "def shouldTheLineBeIgnored(self,line):\n global multi_comment_line_mode\n if multi_comment_line_mode:\n if line.find(\"*/\") != -1:\n # we found the ending line\n multi_comment_line_mode = False\n return False,line[line.find(\"*/\")+2:]+'$endOfMultiLine'\n else:\n # still searching for the end of the comment\n return True,''\n if line == '\\n':\n # in case it's a clean line\n return True,''\n if line == \"\":\n return True,''\n if line[0:2] == \"//\":\n return True,''\n if line[0] == \"/\" and (line[1:3] == '**' or line[1:2] == '*'):\n # it's a multi line comment case\n if line[3:].find(\"*/\") != -1:\n # in this case the multi line comment ends here. we will return the rest of the line\n index_for_the_rest_of_line = line[3:].find(\"*\")+5 # starting after the - 2 for */ and 3 for the real\n # index\n if index_for_the_rest_of_line == len(line)-1:\n return True,'' #in this case we can ignore\n return False,line[index_for_the_rest_of_line:] #returnning the rest\n else:\n multi_comment_line_mode = True\n return True,''\n else:\n return False,'' # it's not the kind of line we want to ignore", "def VerifyStructure(self, parser_mediator, lines):\n match_generator = self._LINE_GRAMMAR.scanString(lines, maxMatches=1)\n return bool(list(match_generator))", "def format_lines(unprocessed_text: str) -> List[List[str]]:\n stored_lines: List[List[str]] = []\n new_line: List = []\n new_word: str = \"\"\n for char in unprocessed_text:\n if char != \"\\n\":\n if char != \" \" and char.isalpha():\n new_word += char\n else:\n new_line.append(new_word)\n new_word = \"\"\n else:\n stored_lines.append(new_line)\n new_line = []\n return stored_lines", "def label_consecutive_lines():\n offset = 0.1\n\n def get_points():\n \"\"\"Prompts for a point triple. Returns a list of the points:\n [<iter>, ...]\n \"\"\"\n points = rs.GetPoints(\n draw_lines=False, in_plane=False, \n message1='Select first tail', message2='Select heads', \n max_points=None, base_point=None)\n return points\n\n def draw_lpoint_triple(text, tail, head):\n \"\"\"Receives label text and a list of point triples:\n str\n [<iter>, ...]\n Draws text dots with <text>-a, -b, -c\n \"\"\"\n line_vector = rs.PointSubtract(head, tail)\n offset_vector = line_vector * offset\n offset_tail = rs.VectorAdd(tail, offset_vector)\n offset_head = rs.VectorSubtract(head, offset_vector)\n axis = [0, 0, 1]\n angle = 90\n rotated_offset_vector = rs.VectorRotate(offset_vector, angle, axis)\n offset_side = rs.VectorAdd(offset_tail, rotated_offset_vector)\n rs.AddTextDot(('%s-a' % text), offset_tail)\n rs.AddTextDot(('%s-b' % text), offset_head)\n rs.AddTextDot(('%s-c' % text), offset_side)\n\n def side_is_same_as_rule(point):\n \"\"\"Receives a point (i.e., a list):\n [num, num, num]\n Returns whether the point is on the same side as the side label in the\n rule\n \"\"\"\n return False\n \n points = get_points()\n text = rs.StringBox('Enter label text')\n for i in range(len(points) - 1):\n # for point in points:\n tail = points[i]\n head = points[i + 1]\n draw_lpoint_triple(text, tail, head)", "def Split_to_Lines(self):\r\n\r\n line = []\r\n word = \"\"\r\n comment = False\r\n String = False\r\n for i in range(0, len(self.Code)):\r\n if self.Code[i] == '\\n':\r\n if word != '':\r\n if (String is True) and (word[0] != word[len(word) - 1]):\r\n return False\r\n line.append(word)\r\n if len(line) != 0:\r\n self.Code_Lines.append(line)\r\n if len(line) >= 2:\r\n if line[0] == \"end\":\r\n break\r\n word = \"\"\r\n line = []\r\n comment = False\r\n String = False\r\n elif not comment:\r\n if self.Code[i] == ' ':\r\n if not String:\r\n if word != \"\" and word != '':\r\n line.append(str(word))\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == '\"':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\"'\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n elif self.Code[i] == '\\'':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\\''\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if String:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == ';':\r\n comment = True\r\n\r\n elif self.Code[i] in self.Special_Symbols:\r\n if word != '':\r\n line.append(word)\r\n line.append(self.Code[i])\r\n word = \"\"\r\n else:\r\n line.append(self.Code[i])\r\n\r\n else:\r\n word += self.Code[i].lower()\r\n\r\n return self.Code_Lines", "def lines(text):\n return [l.strip() for l in text.strip().splitlines() if l.strip()]", "def test_newlinesBeforeLineBreaking(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \"\\n\" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )", "def process_line(self, line):\n ltype = self.line_type(line)\n if ltype == 'gene':\n self.process_gene_line(line)\n return True\n elif ltype == 'mRNA':\n self.process_mrna_line(line)\n return True\n elif ltype == 'CDS':\n self.process_cds_line(line)\n return True\n elif ltype == 'exon':\n self.process_exon_line(line)\n return True\n elif ltype == 'start_codon' or ltype == 'stop_codon':\n self.process_other_feature_line(line)\n return True\n else:\n self.skipped_features += 1\n return False", "def test_file_readlines(self):\n FileWriter(self.multiline_path).write(self.multiline_string)\n line_list = FileReader(self.multiline_path).readlines()\n self.assertEqual(line_list, self.multiline_list)", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def paragraphlines_filter(value, autoescape=None):\n autoescape = autoescape and not isinstance(value, SafeData)\n return mark_safe(linebreaks(value, autoescape))", "def _is_clustal_seq_line(line):\n return line and (not line[0].isspace()) and\\\n (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def hasNextLine(self) -> bool:\n raise NotImplementedError", "def filter_line(line:str) -> bool:\n fails = is_short_sentence(line, MIN_LINE_LENGTH)\n\n return not fails", "def isSpam(textLine):\n\treturn True", "def lemmatize_text(text):\n text = nlp(text)\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n return text", "def is_horizontal(line:tuple)->bool:\n return line[0][1] == line[1][1]", "def __long__line_case(content: list, line: str) -> bool:\n if len(line) > 25:\n if line[25].isdigit():\n content.append(line[26:].strip())\n return True\n return False", "def _get_multiline(self):\n lines = []\n line = self._get_line()\n lines.append(line)\n if line[3:4] == \"-\":\n code = line[:3]\n while 1:\n nextline = self._get_line()\n lines.append(nextline)\n if nextline[:3] == code and nextline[3:4] != \"-\":\n break\n return lines", "def is_cmt(line,cmt):\n\n if len(line)==1:\n return False\n else:\n for i in range(len(line)):\n if line[i]!=' ' and line[i]!='\\t':\n if len(line[i:])>len(cmt):\n if line[i:i+len(cmt)]==cmt:\n return True\n else:\n break\n return False", "def textile_restricted(text, lite=True, noimage=True, html_type='xhtml',\n auto_link=False):\n return Textile(restricted=True, lite=lite,\n noimage=noimage, auto_link=auto_link).textile(\n text, rel='nofollow', html_type=html_type)", "def _split_loop_and_tag_text(text_list):\n\n # Split into separate lines and put into a\n # pandas dataframe so data selection is easier\n text_list = text_list.split('\\n')\n text_df = pd.DataFrame(text_list, columns=[u'text'])\n \n # Strip the whitespace off the beginning and end\n text_df[u'text'] = text_df.text.str.strip()\n \n # This column determines if text contains a _tag\n text_df[u'tag'] = False\n text_df.ix[text_df.text.str.contains('_\\w+',regex=True),u'tag'] = True\n\n # This column determines if text contains a loop tag (loop_)\n text_df[u'loop'] = False\n text_df.ix[text_df.text.str.contains('loop_'),u'loop'] = True\n \n # The line below the tag is always part of the tag group\n text_df.ix[text_df.ix[text_df.loop].index+1, u'loop'] = True\n\n # Any row that doesn't contain a tag and is not currently part of a loop\n # must also be part of a loop\n text_df.ix[(text_df.tag==False)&(text_df.loop!=True),u'loop'] = True\n \n # Separate the data that is not part of a loop\n text_df_tags = text_df.ix[text_df.loop==False, u'text'].tolist()\n \n # Separate the data that is part of a loop\n text_df_loop = text_df.ix[text_df.loop==True, u'text'].tolist()\n\n return text_df_loop, text_df_tags", "def lineage_test(self, rule, strip_negation=False):\n\n if 'match_leaf' in rule and rule['match_leaf']:\n # stupid trick to make a generator with one item\n lineage_obj = (self for _ in [None])\n lineage_depth = 1\n else:\n lineage_obj = self.lineage()\n lineage_depth = self.depth()\n\n if not len(rule['lineage']) == lineage_depth:\n return False\n\n matches = 0\n\n for lineage_rule, section in zip(rule['lineage'], lineage_obj):\n object_rules, text_match_rules = HConfigChild._explode_lineage_rule(\n lineage_rule)\n\n if not HConfigChild._lineage_eval_object_rules(\n object_rules, section):\n return False\n\n # This removes negations for each section but honestly,\n # we really only need to do this on the last one\n if strip_negation:\n if section.text.startswith('no '):\n text = section.text[3:]\n elif section.text.startswith('default '):\n text = section.text[8:]\n else:\n text = section.text\n else:\n text = section.text\n\n if HConfigChild._lineage_eval_text_match_rules(\n text_match_rules, text):\n matches += 1\n continue\n else:\n return False\n\n return matches == len(rule['lineage'])", "def list_wrapped_lines():\n for line in text.split('\\n'):\n if len(line) <= ncolumns:\n yield line\n else:\n while True:\n # try to wrap at a word-break\n last_word_break = re.search(r\"\\s+(?=\\S*$)\", line[:ncolumns])\n if last_word_break:\n yield line[:last_word_break.start()]\n line = line[last_word_break.end():].lstrip()\n else:\n yield line[:ncolumns]\n line = line[ncolumns:].lstrip()\n if len(line) == 0:\n break\n elif len(line) <= ncolumns:\n yield line\n break", "def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):\n text = \"a b c\\n1 2 3\\n4 5 6\\n7 8 9\\n\"\n expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=(\"a\", \"b\", \"c\"))\n\n for newline in (\"\\r\\n\", \"\\r\"):\n table = read_basic(text.replace(\"\\n\", newline), parallel=parallel)\n assert_table_equal(table, expected)\n\n # Make sure the splitlines() method of FileString\n # works with CR/CR+LF line endings\n text = \"#\" + text\n for newline in (\"\\r\\n\", \"\\r\"):\n table = read_commented_header(text.replace(\"\\n\", newline), parallel=parallel)\n assert_table_equal(table, expected)\n\n expected = Table(\n [MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],\n names=(\"a\", \"b\", \"c\"),\n )\n expected[\"a\"][0] = np.ma.masked\n expected[\"c\"][0] = np.ma.masked\n text = \"a\\tb\\tc\\nN\\tN\\tN\\n\\t2\\t\\n4\\t5\\t6\\n7\\t8\\t9\\n\"\n for newline in (\"\\r\\n\", \"\\r\"):\n table = read_rdb(text.replace(\"\\n\", newline), parallel=parallel)\n assert_table_equal(table, expected)\n assert np.all(table == expected)", "def check_Lines(self):\n\n pass", "def isfixline(number):\n if number[0] == '(':\n return True\n return False", "def check_marked_paragraph(paragraph, number):\n\n\tq = 0 # счетчик найденных маркеров\n\tchars = '<> ' # возможные символы в каретке\n\n\tfor i in range(len(paragraph.runs)):\n\t\tif \"<>\" in paragraph.runs[i].text: # если в тексте каретки встречается маркер\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждый символ в каретке\n\t\t\t\tif c not in chars: # если он не входит в список разрешенных символов\n\t\t\t\t\treturn False\n\t\t\tq += 1 # если проверка пройдена, увеличиваем счетчик\n\t\telif \"<\" in paragraph.runs[i].text and \">\" in paragraph.runs[i+1].text: # если маркер разделен на две соседние каретки\n\t\t\tfor c in paragraph.runs[i].text: # проверяем каждую из кареток\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tfor c in paragraph.runs[i+1].text:\n\t\t\t\tif c not in chars:\n\t\t\t\t\treturn False\n\t\t\tq += 1\n\n\tif q != number: # если количество маркеров не совпало с указанным в выводе\n\t\treturn False\n\telse:\n\t\treturn True", "def inParensOrMultiline(line):\n parens = 0\n in_quotes_1 = 0\n in_quotes_2 = 0\n skip = False\n in_multiline = 0\n\n for i in accumulator:\n if skip:\n skip = False\n continue\n\n if i == 'R' and in_multiline == 0:\n in_multiline = 1\n continue\n\n if i == '\"' and in_multiline == 1:\n in_multiline = 2\n continue\n\n if i == '(' and in_multiline == 2:\n in_multiline = 3\n continue\n\n if in_multiline >= 3:\n if i == ')' and in_multiline == 3:\n in_multiline = 4\n continue\n\n if i == '\"' and in_multiline == 4:\n in_multiline = 0\n continue\n\n in_multiline = 3\n continue\n\n in_multiline = 0\n\n if i == '\\\\':\n skip = True\n\n if i == '\"' and not in_quotes_2:\n in_quotes_1 = (1 - in_quotes_1)\n\n if i == '\\'' and not in_quotes_1:\n in_quotes_2 = (1 - in_quotes_2)\n\n if i == '(' and not (in_quotes_1 or in_quotes_2):\n parens += 1\n\n if i == ')' and not (in_quotes_1 or in_quotes_2):\n parens -= 1\n\n return parens > 0 or in_multiline >= 3", "def test_sentencier_en_new_lines():\n sentencizer = Sentencizer()\n text = 'It is a sunny day!!!! When Andy comes back,\\n' \\\n 'we are going to the zoo.'\n crafted_chunk_list = sentencizer.segment(text, 0)\n assert len(crafted_chunk_list) == 3", "def process_bullets(self):\n parts = re.findall(r'^\\*([^\\*]*)', self.unixtext, re.M | re.DOTALL)\n bullets = []\n for part in parts:\n pos = part.find(\"\\n\\n\")\n if pos > 0:\n bullets.append(\" \".join(part[:pos].replace(u\"\\n\", \"\").split()))\n else:\n bullets.append(\" \".join(part.replace(u\"\\n\", \"\").split()))\n return bullets", "def test_with_big_lists(self):\n\n self.check_markdown(\n '''\n - List\n\n ??? note \"Details\"\n\n - Paragraph\n\n Paragraph\n\n - Paragraph\n\n paragraph\n ''',\n '''\n <ul>\n <li>\n <p>List</p>\n <details class=\"note\">\n <summary>Details</summary>\n <ul>\n <li>\n <p>Paragraph</p>\n <p>Paragraph</p>\n </li>\n <li>\n <p>Paragraph</p>\n <p>paragraph</p>\n </li>\n </ul>\n </details>\n </li>\n </ul>\n ''',\n True\n )", "def winNewLines(inString):\n return reUnixNewLine.sub('\\r\\n',inString)", "def is_nested(line):\n pass", "def split_by_lines(text, remove_empty=False):\n\tlines = text.splitlines()\n\t\n\treturn remove_empty and [line for line in lines if line.strip()] or lines", "def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result", "def nonempty_lines(text):\n return [line for line in text.split('\\n') if line]", "def chunk_in_text(chunk, text):\n chunk = clean_chunk(chunk)\n return text.find(chunk) >= 0", "def split_txt_multiline(data: str) -> list[str]:\n limit = 255\n\n items = []\n data2 = data\n while len(data2) > limit:\n items.append(f'\"{data2[:limit]}\"')\n data2 = data2[limit:]\n items.append(f'\"{data2}\"')\n\n return items", "def iter_logical_lines(cls, blob):\r\n indent_stack = []\r\n contents = []\r\n line_number_start = None\r\n\r\n def translate_logical_line(start, end, contents, endmarker=False):\r\n while contents[0] == '\\n':\r\n start += 1\r\n contents.pop(0)\r\n while contents[-1] == '\\n':\r\n end -= 1\r\n contents.pop()\r\n indent = len(indent_stack[-1]) if indent_stack else 0\r\n if endmarker:\r\n indent = len(contents[0])\r\n return (start, end + 1, indent)\r\n\r\n for token in cls.iter_tokens(blob):\r\n token_type, token_text, token_start = token[0:3]\r\n if token_type == tokenize.INDENT:\r\n indent_stack.append(token_text)\r\n if token_type == tokenize.DEDENT:\r\n indent_stack.pop()\r\n if token_type in cls.SKIP_TOKENS:\r\n continue\r\n contents.append(token_text)\r\n if line_number_start is None:\r\n line_number_start = token_start[0]\r\n elif token_type in (tokenize.NEWLINE, tokenize.ENDMARKER):\r\n yield translate_logical_line(\r\n line_number_start,\r\n token_start[0] + (1 if token_type is tokenize.NEWLINE else -1),\r\n list(filter(None, contents)),\r\n endmarker=token_type == tokenize.ENDMARKER)\r\n contents = []\r\n line_number_start = None", "def lines2breaks(lines, delimiter=\"\\n\", number_lines=False):\n if isinstance(lines, str):\n lines = lines.split(delimiter)\n if not number_lines:\n lines = [\"%s\" % line for line in lines]\n output = \"<pre>%s</pre>\" % \"\".join(lines)\n else:\n lines = [\"<li>%s</li>\" % line for line in lines]\n output = \"<ol><pre>\" + \"\".join(lines) + \"</pre></ol>\"\n return output", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def lemmatize(text, nlp):\n\n return [word.lemma_ for word in nlp(text)]", "def nostatement(phrase):\n\n is_printable = lambda x: x in printable\n is_whitespace = lambda x: x in whitespace\n if not any([is_printable(x) and not is_whitespace(x) for x in phrase]):\n return 'Fine. Be that way!'", "def paragraphs_from_lines(lines, splitchar='\\n', break_on_num_newlines=2, force_unix_linebreaks=True):\n if splitchar:\n if force_unix_linebreaks:\n lines = linebreaks_win2unix(lines)\n\n lines = lines.split(splitchar)\n else:\n if type(lines) not in (tuple, list):\n raise ValueError(\"`lines` must be passed as list or tuple if `splitchar` evaluates to False\")\n\n n_lines = len(lines)\n paragraphs = []\n n_emptylines = 0\n cur_par = ''\n # iterate through all lines\n for i, l in enumerate(lines):\n if not splitchar and force_unix_linebreaks:\n l = linebreaks_win2unix(l)\n\n if l.strip():\n if not cur_par:\n cur_par = l\n else:\n cur_par += ' ' + l\n n_emptylines = 0\n else:\n n_emptylines += 1\n\n if (n_emptylines >= break_on_num_newlines-1 or i == n_lines-1) and cur_par:\n paragraphs.append(cur_par)\n cur_par = ''\n n_emptylines = 0\n\n return paragraphs", "def test_text_line_mode(self):\n outfile = cStringIO.StringIO()\n var_order = [2,1,0]\n\n \n # Write out in the order 2, 1, 0. In a normal program those constants\n # would come from an enum indicating the order in which the fields\n # appear in schema\n aggregator = lra.LineRawHandleAggregator(outfile, var_order = var_order)\n\n aggregator.map({0: 'world', 1: 'there', 2: 'hello'})\n aggregator.map({0: 'good', 1: 'is', 2: 'this'})\n\n self.assertEqual(outfile.getvalue(),\n 'INSERT\\nhello\\nthere\\nworld\\nENDINSERT\\n'\n 'INSERT\\nthis\\nis\\ngood\\nENDINSERT\\n')", "def check(text):\n text = text.copy()\n if not isinstance(text,list): # TEST\n raise TypeError(\"text must be a listlike :\\n{}\".format(text))\n \n # managing latex genuine tag\n for i, line in enumerate(text):\n if '\\\\' in line:\n utils.underlineall(line,'\\\\')\n logger.warning(\"Genuine latex tags were found, but won't be evaluated on line {}\".format(i))\n \n # check placeholders # TEST\n parsers['v'].check_syntax(text)\n \n for i,line in enumerate(text):\n # checking ends of lines TEST\n space_before_match = re.search(\"[^ ],,\",line)\n if space_before_match:\n utils.underlineall(line,space_before_match.group())\n raise SyntaxError(\"Please put a space before EOL tag in line {}\".format(i))\n space_after_match = re.search(\",,[^ ]\",line)\n if space_after_match:\n utils.underlineall(line,space_after_match.group())\n raise SyntaxError(\"Please put a space or a carriage return after EOL tag in line {}\".format(i))\n \n # checking illegal closing tags TEST\n for parser, module in parsers.items():\n if not module.has_closing_tag:\n if closing_mark + parser in line:\n utils.underlineall(line,closing_mark+parser)\n raise SyntaxError(\"{} parser has no closing tag: check line {}\".format(parser,i))\n \n # checking other tags\n if opening_mark in line:\n fline,nothing, sline = line.partition(opening_mark)\n while True:\n # checking each sub parser\n mark_to_test = sline.split()[0]\n parser = parsers[mark_to_test[0]]\n checker.checkmark(mark_to_test,parser,line,i)\n checker.checkargs(parser,mark_to_test,sline,line,i)\n \n # checking closing tag TEST BUG\n if parser.has_closing_tag:\n closing_tag = closing_mark + mark_to_test\n opening_tag = opening_mark + mark_to_test\n if opening_tag in sline:\n utils.underlineall(sline,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected on line {}\".format(opening_tag,i))\n if closing_tag in sline:\n part1,tag,part2 = sline.partition(closing_tag)\n sline = part1 + part2\n else: # looking for closing tag in the rest of the text\n for j,line2 in enumerate(text[i+1:]):\n j+=i+1\n fline2, mark_expected, sline2 = line2.partition(closing_tag)\n if opening_tag in fline2:\n print(\"Opening tag not closed, line {}\".format(i))\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n print(\"Opening tag found too soon, line {}\".format(j))\n utils.underlineall(line2,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected\".format(opening_tag))\n if mark_expected:\n text[j] = fline2 + sline2\n break\n else:\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n raise SyntaxError(\"No closing tag found for {} in line {}\".format(opening_tag,i))\n new_partition = sline.partition(opening_mark)\n fline = fline + nothing + new_partition[0]\n nothing, sline = new_partition[1:]\n \n if opening_mark not in sline: # condition to break loop\n line = fline + nothing + sline\n break\n \n # checking alone closing tags -> closing tags are supposed to be deleted TEST\n if closing_mark in line: \n alone_closing_tag = utils.wrappedchars(line,closing_mark)\n utils.underlineall(line,alone_closing_tag)\n raise SyntaxError(\"An only closing tag has been found in line {}\".format(i))\n \n return True", "def parse_and_check_reactants(raw_text_line: str) -> List[str]:\n smiles = raw_text_line.strip().replace(' ', '')\n out = []\n for s in smiles.split('.'):\n mol = Chem.MolFromSmiles(s, sanitize=False)\n if mol is None:\n print(smiles)\n raise ValueError\n out.append(s)\n return out", "def isbatch(line):\n return line and (\n line.strip()[:3] == \"BHS\"\n or (line.count(\"MSH\") > 1 and line.strip()[:3] != \"FHS\")\n )", "def hasRawText(self, text):\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|dl|pre|h\\d)[^>]*?>.*</\\1>',\n re.S).sub('', text.strip()).strip()\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\n return '' != r", "def test_with_2_lines():\n line = \"n\" * 15 + \"\\n\" + \"n\" * 60 + \" \" + \"n\" * 10\n assert wrap_line(line) == \"n\" * 15 + \" \" + \"n\" * 60 + \"\\n\" + \"n\" * 10", "def is_markdown_cell(cell):\n return cell[\"cell_type\"] == \"markdown\"", "def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)", "def _isLine(self):\n return (self.width == 0 and self.height > 1) or (self.height == 0 and self.width > 1)", "def drawMirroredSentence(self, braille_code, x=Config.MARGIN, y=Config.MARGIN,\n wrap_width=(Config.PAPER_WIDTH - (Config.MARGIN * 2)),\n x_spacing=Config.X_SPACING,\n y_spacing=Config.Y_SPACING,\n color=Config.FONT_COLOR):\n\n dx, dy = 0, 0\n character_width = Config.FONT_SIZE * 3 + x_spacing * 2\n character_height = Config.FONT_SIZE * 5 + y_spacing\n\n # Displaying the letters\n for n in range(len(braille_code)):\n\n character = braille_code[n]\n self.drawChar(character, x + dx, y + dy, color)\n\n if dx + character_width >= wrap_width: # If it has hit the right margin, wrap\n dx = 0\n dy += character_height\n else:\n dx += character_width # Move to next char\n\n if dy + character_height >= Config.PAPER_HEIGHT - Config.MARGIN * 2: # If it hits the end of the page\n # Make a new Paper object, have it draw remaining chars\n next_page = Paper(self.name, (self._page + 1), self.charset)\n next_page.drawMirroredSentence(braille_code[n:], x, y, wrap_width, x_spacing, y_spacing, color)\n break\n\n self.mirror()\n self.save()" ]
[ "0.7332721", "0.7118218", "0.6140549", "0.58115304", "0.5777548", "0.5629705", "0.5620639", "0.5535499", "0.5423312", "0.5394802", "0.5378505", "0.53308886", "0.5302663", "0.5278468", "0.52720255", "0.5223611", "0.5223611", "0.51562977", "0.51394373", "0.5101829", "0.50781065", "0.50735366", "0.5072135", "0.5064861", "0.5036632", "0.5016328", "0.5015158", "0.50030005", "0.49849412", "0.49588516", "0.49533102", "0.4946555", "0.49360365", "0.4912046", "0.48825857", "0.48741254", "0.48651648", "0.4863243", "0.486008", "0.4855046", "0.48363706", "0.48294747", "0.48187727", "0.48128045", "0.48127413", "0.48062944", "0.48051795", "0.4797154", "0.47928038", "0.47907835", "0.4783701", "0.4773568", "0.47707987", "0.4761893", "0.4760242", "0.47582078", "0.474956", "0.47422186", "0.47337216", "0.47239554", "0.47233567", "0.4723212", "0.47150907", "0.47092232", "0.47090566", "0.4702784", "0.47020108", "0.47012442", "0.47006586", "0.4696878", "0.46958694", "0.46940348", "0.46771005", "0.46761408", "0.46759897", "0.4675042", "0.46749946", "0.4674955", "0.46742222", "0.4671258", "0.46689895", "0.46672675", "0.46637255", "0.4662818", "0.46588197", "0.4657197", "0.46474463", "0.46353084", "0.46352148", "0.4632772", "0.4631995", "0.4630774", "0.46297437", "0.46271876", "0.46252364", "0.4619531", "0.4619446", "0.4618822", "0.4618822", "0.4617933" ]
0.7593807
0
Calculates sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and frequency (freq in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then Tsky is calculated for each frequency in the array, and returned value is list of Tsky's
def tsky(gl, gb, freq, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) return tsky else: temps=[] for freq in freqs: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) temps.append(tsky) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky_range(gl, gb, f1, f2, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b = 179\n\tl = int(gl + 0.5)\n\tif gl >= 360: l = 0\n\tl = int((l / 4))\n\t\n\tif freqs == None:\n\t\ttot=0\n\t\tfor ii in xrange(101):\n\t\t\tfreq = f1 + ii*(f2-f1)/100.\n\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\ttot += tsky\n\t\ttot /= 100.\n\t\treturn tot\n\telse:\n\t\ttemps=[]\n\t\tfor ff in xrange(1, len(freqs)):\n\t\t\ttot = 0\n\t\t\tfor ii in xrange(101):\n\t\t\t\tfreq = freqs[ff-1] + ii*(freqs[ff]-freqs[ff-1])/100.\n\t\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\t\ttot += tsky\n\t\t\ttot /= 100.\n\t\t\ttemps.append(tot)\n\t\treturn temps", "def tskypy(self, psr):\n # ensure l is in range 0 -> 360\n b = psr.gb\n if psr.gl < 0.:\n l = 360 + psr.gl\n else:\n l = psr.gl\n\n # convert from l and b to list indices\n j = b + 90.5\n if j > 179:\n j = 179\n\n nl = l - 0.5\n if l < 0.5:\n nl = 359\n i = float(nl) / 4.\n \n tsky_haslam = self.tskylist[180*int(i) + int(j)]\n # scale temperature before returning\n return tsky_haslam * (self.freq/408.0)**(-2.6)", "def Tsky(source, freq=350*u.MHz, model='2008'):\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n\n m=SkyModel(freq=freq, tskymodel=model)\n return m.Tsky(source)", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def gpower_integrand(self,theta,phi):\n\n wavelength = 299.9/self.frequency\n if(phi == 0): phi = .00001\n if(phi == np.pi): phi = np.pi - .00001\n\n self.aa.alt = np.pi/2.0 - theta\n self.aa.az = np.pi/2.0 - phi\n\n coords = self.aa.raDec(self.__lst_current,self.location)\n\n coords = self.Rotator(np.pi/2 - coords.dec,coords.ra)\n\n Tsky = hp.get_interp_val(self.hpmap,coords[0],coords[1])*(self.frequency/408.0)**(-2.55)\n\n ans = self.lofasm.beam_pattern(theta,phi,[0,0,1])\n ans += self.lofasm.beam_pattern(theta,phi,[0,1,0]) \n ans *= (Tsky*(1.3804e-23)/wavelength**2)/(1e-26)/2.0\n\n return ans", "def gtgram(\n wave, fs, window_time, hop_time, channels, f_min, f_max=None, return_freqs=False\n):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gtgram_strides(fs, window_time, hop_time, xe.shape[1])\n\n y = np.zeros((channels, ncols))\n\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n\n if return_freqs:\n cfs = centre_freqs(fs, channels, f_min, f_max)\n return cfs, y\n return y", "def compute_ctf(freqs,rots,akv,cs,wgh,dfmid1f,dfmid2f,angastf,dscale,bfactor=None): \n av = akv * 1e3 # Convert kilovots to volts\n cs = cs * 1e7 # Convert spherical aberation from mm to A\n \n # wavelength of electrons\n elambda = 12.2643247 / n.sqrt(av + av**2 * 0.978466e-6)\n \n wgh1 = dscale*n.sqrt(1.0 - wgh**2)\n wgh2 = dscale*wgh\n\n ix = freqs[:,0]\n iy = freqs[:,1]\n freq_radius = n.sqrt(ix**2 + iy**2)\n\n angle = elambda*freq_radius\n angspt = n.arctan2(iy,ix)\n if rots is not None:\n angspt = n.mod(angspt.reshape((-1,1)) + rots.reshape((1,-1)),2.0*n.pi)\n angle = angle.reshape((-1,1)) \n c1 = 2.0*n.pi*angle**2/(2.0*elambda)\n c2 = -c1*cs*angle**2/2.0\n angdif = angspt - angastf\n ccos = n.cos(2.0*angdif)\n df = 0.5*(dfmid1f + dfmid2f + ccos*(dfmid1f-dfmid2f))\n chi = c1*df + c2\n\n ctf = -wgh1*n.sin(chi) - wgh2*n.cos(chi)\n \n if bfactor is not None:\n ctf *= envelope_function(freq_radius, bfactor)\n\n return n.require(ctf,dtype = freqs.dtype)", "def thermodynamic_temperature(frequency, T_cmb=None):\n nu = frequency.to(si.GHz, spectral())\n\n if T_cmb is None:\n from astropy.cosmology import default_cosmology\n\n T_cmb = default_cosmology.get().Tcmb0\n\n def f(nu, T_cmb=T_cmb):\n x = _si.h * nu / _si.k_B / T_cmb\n return x**2 * np.exp(x) / np.expm1(x) ** 2\n\n def convert_Jy_to_K(x_jybm):\n factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(\n astrophys.Jy\n )\n return x_jybm / factor\n\n def convert_K_to_Jy(x_K):\n factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(\n si.K\n )\n return x_K / factor\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],\n \"thermodynamic_temperature\",\n {\"frequency\": frequency, \"T_cmb\": T_cmb},\n )", "def fringe_frequency(self, wavelength=0.028, terrestrial_latitude=37.873199, h_s0=0):\n\t\tBew, Bns, baseline = bf.baseline_script_2D(self.hour_angles, 0, self.volts, self.times)\n\t\tfirst_term = Bew / wavelength * np.cos(self.dec) * cos(h_s0)\n\t\tsecond_term = Bns / wavelength * np.sin(terrestrial_latitude) * np.cos(self.dec) * np.sin(h_s0)\n\t\treturn first_term - second_term", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def system_temp(freq_hz):\n freqs = np.array([0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9, 0.45e9,\n 0.55e9, 0.65e9])\n t_sys = np.array([4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3])\n f = interp1d(np.log10(freqs), np.log10(t_sys), kind='cubic')\n return 10**f(np.log10(freq_hz))", "def Gamma_per_grain(ZZall, Gamma_a_Z, ZZ_fz, fdist, GG):\n\n # index in the ZZall array for the charges in ZZ_fz\n zi_down = np.where(ZZall == ZZ_fz[0])[0][0]# find the index of the ZZ_fz[0] in ZZall \n zi_up = np.where(ZZall == ZZ_fz[-1])[0][0]# find the index of the ZZ_fz[-1] in ZZall\n \n #Gamma_pe_a = np.sum(fz*Gamma_dotdot_scaled[zi_down:zi_up+1])\n Gamma_pe_a = np.sum(fdist*Gamma_a_Z[zi_down:zi_up+1])\n \n return Gamma_pe_a", "def at_frequencies(\n self,\n freqs,\n inplace=True,\n freq_interp_kind=\"cubic\",\n nan_handling=\"clip\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n atol=None,\n ):\n sky = self if inplace else self.copy()\n\n if atol is None:\n atol = self.freq_tol\n\n if self.spectral_type == \"spectral_index\":\n sky.stokes = (\n self.stokes\n * (freqs[:, None].to(\"Hz\") / self.reference_frequency[None, :].to(\"Hz\"))\n ** self.spectral_index[None, :]\n )\n sky.reference_frequency = None\n elif self.spectral_type == \"full\":\n # Find a subset of the current array.\n ar0 = self.freq_array.to_value(\"Hz\")\n ar1 = freqs.to_value(\"Hz\")\n tol = atol.to_value(\"Hz\")\n matches = np.fromiter(\n (np.isclose(freq, ar1, atol=tol).any() for freq in ar0), dtype=bool\n )\n\n if np.sum(matches) != freqs.size:\n raise ValueError(\n \"Some requested frequencies are not present in the current SkyModel.\"\n )\n sky.stokes = self.stokes[:, matches, :]\n if sky.freq_edge_array is not None:\n sky.freq_edge_array = sky.freq_edge_array[:, matches]\n elif self.spectral_type == \"subband\":\n if np.max(freqs.to(\"Hz\")) > np.max(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is larger than the highest subband frequency.\"\n )\n if np.min(freqs.to(\"Hz\")) < np.min(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is smaller than the lowest subband frequency.\"\n )\n # Interpolate. Need to be careful if there are NaNs -- they spoil the\n # interpolation even for sources that do not have any NaNs.\n stokes_unit = self.stokes.unit\n if np.any(np.isnan(self.stokes.value)):\n allowed_nan_handling = [\"propagate\", \"interp\", \"clip\"]\n if nan_handling not in allowed_nan_handling:\n raise ValueError(\n f\"nan_handling must be one of {allowed_nan_handling}\"\n )\n\n message = \"Some stokes values are NaNs.\"\n if nan_handling == \"propagate\":\n message += (\n \" All output stokes values for sources with any NaN values \"\n \"will be NaN.\"\n )\n else:\n message += \" Interpolating using the non-NaN values only.\"\n message += (\n \" You can change the way NaNs are handled using the \"\n \"`nan_handling` keyword.\"\n )\n warnings.warn(message)\n stokes_arr = self.stokes.value\n freq_arr = self.freq_array.to(\"Hz\").value\n at_freq_arr = freqs.to(\"Hz\").value\n # first interpolate any that have no NaNs\n wh_nan = np.nonzero(np.any(np.isnan(stokes_arr), axis=(0, 1)))[0]\n wh_non_nan = np.nonzero(np.all(~np.isnan(stokes_arr), axis=(0, 1)))[0]\n assert wh_non_nan.size + wh_nan.size == self.Ncomponents, (\n \"Something went wrong with spliting sources with NaNs. This is a \"\n \"bug, please make an issue in our issue log\"\n )\n new_stokes = np.zeros(\n (4, freqs.size, self.Ncomponents), dtype=stokes_arr.dtype\n )\n if wh_non_nan.size > 0:\n finterp = scipy.interpolate.interp1d(\n freq_arr,\n stokes_arr[:, :, wh_non_nan],\n axis=1,\n kind=freq_interp_kind,\n )\n new_stokes[:, :, wh_non_nan] = finterp(at_freq_arr)\n\n if nan_handling == \"propagate\":\n new_stokes[:, :, wh_nan] = np.NaN\n else:\n wh_all_nan = []\n wh_nan_high = []\n wh_nan_low = []\n wh_nan_many = []\n for comp in wh_nan:\n freq_inds_use = np.nonzero(\n np.all(~np.isnan(stokes_arr[:, :, comp]), axis=0)\n )[0]\n if freq_inds_use.size == 0:\n new_stokes[:, :, comp] = np.NaN\n wh_all_nan.append(comp)\n continue\n at_freq_inds_use = np.arange(freqs.size)\n\n if np.max(at_freq_arr) > np.max(freq_arr[freq_inds_use]):\n at_freq_inds_use = np.nonzero(\n at_freq_arr <= np.max(freq_arr[freq_inds_use])\n )[0]\n at_freqs_large = np.nonzero(\n at_freq_arr > np.max(freq_arr[freq_inds_use])\n )[0]\n wh_nan_high.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_large, comp] = np.NaN\n else: # clip\n large_inds_use = np.full(\n (at_freqs_large.size), freq_inds_use[-1]\n )\n new_stokes[:, at_freqs_large, comp] = stokes_arr[\n :, large_inds_use, comp\n ]\n\n if np.min(at_freq_arr) < np.min(freq_arr[freq_inds_use]):\n at_freq_inds_use_low = np.nonzero(\n at_freq_arr >= np.min(freq_arr[freq_inds_use])\n )[0]\n at_freq_inds_use = np.intersect1d(\n at_freq_inds_use, at_freq_inds_use_low\n )\n at_freqs_small = np.nonzero(\n at_freq_arr < np.min(freq_arr[freq_inds_use])\n )[0]\n wh_nan_low.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_small, comp] = np.NaN\n else: # clip\n small_inds_use = np.full(\n (at_freqs_small.size), freq_inds_use[0]\n )\n new_stokes[:, at_freqs_small, comp] = stokes_arr[\n :, small_inds_use, comp\n ]\n\n if at_freq_inds_use.size > 0:\n try:\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=freq_interp_kind,\n )\n except ValueError:\n wh_nan_many.append(comp)\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=\"linear\",\n )\n new_stokes[:, at_freq_inds_use, comp] = finterp(\n at_freq_arr[at_freq_inds_use]\n )\n else:\n continue\n if len(wh_all_nan) > 0:\n warnings.warn(\n f\"{len(wh_all_nan)} components had all NaN stokes values. \"\n \"Output stokes for these components will all be NaN.\"\n )\n if len(wh_nan_high) > 0:\n message = (\n f\"{len(wh_nan_high)} components had all NaN stokes values \"\n \"above one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the highest frequency \"\n \"without a NaN for these components at these \"\n \"frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_low) > 0:\n message = (\n f\"{len(wh_nan_low)} components had all NaN stokes values below \"\n \"one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the lowest frequency \"\n \"without a NaN for these components at these frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_many) > 0:\n warnings.warn(\n f\"{len(wh_nan_many)} components had too few non-NaN stokes \"\n \"values for chosen interpolation. Using linear \"\n \"interpolation for these components instead.\"\n )\n sky.stokes = new_stokes * stokes_unit\n else:\n finterp = scipy.interpolate.interp1d(\n self.freq_array.to(\"Hz\").value,\n self.stokes.value,\n axis=1,\n kind=freq_interp_kind,\n )\n sky.stokes = finterp(freqs.to(\"Hz\").value) * stokes_unit\n else:\n # flat spectrum\n stokes_unit = self.stokes.unit\n sky.stokes = np.repeat(self.stokes.value, len(freqs), axis=1) * stokes_unit\n\n sky.reference_frequency = None\n sky.Nfreqs = freqs.size\n sky.freq_array = freqs\n if sky.spectral_type == \"subband\" and sky.freq_edge_array is not None:\n sky.freq_edge_array = None\n sky.spectral_type = \"full\"\n if sky.frame_coherency is not None:\n sky.coherency_radec = sky.calc_frame_coherency()\n\n if run_check:\n sky.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if not inplace:\n return sky", "def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies", "def get_skylight(self, coords):\n\n x, y, z = coords\n index, y = divmod(y, 16)\n\n return self.sections[index].get_skylight((x, y, z))", "def get_spectrum_freq(self):\n if not self.is_a_spectrum_file():\n raise TelemacException(\\\n \"This file does not seem to be a spectrum file\")\n\n nfreq = 0\n eps = 1e-6\n f_1 = 10e10\n f_2 = 10e10\n raisf = 0.\n for x, y in zip(self.meshx, self.meshy):\n if abs(x) <= eps and y >= 0.:\n nfreq += 1\n f_temp = y\n if f_temp < f_1:\n f_2 = f_1\n f_1 = f_temp\n elif f_temp < f_2:\n f_2 = f_temp\n\n raisf = f_2/f_1\n\n freqs = [f_1 * raisf**i for i in range(nfreq)]\n\n dfreqs = np.zeros(nfreq, dtype=np.float64)\n\n auxi = (raisf - 1.)/2.\n dfreqs[0] = auxi*freqs[0]\n for i in range(1, nfreq-1):\n dfreqs[i] = auxi*(freqs[i] + freqs[i-1])\n\n dfreqs[-1] = auxi*freqs[-2]\n\n return np.array(freqs), dfreqs", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def tidefit(self,frqnames=None,basetime=None):\r\n \r\n # Get the tidal fruequencies\r\n if frqnames == None:\r\n\t\t\t# This returns the default frequencies from the uspectra class\r\n frq,frqnames = getTideFreq(Fin=None)\r\n else:\r\n frq,frqnames = getTideFreq(Fin=frqnames)\r\n \r\n # Call the uspectra method\r\n U = uspectra(self.tsec,self.y,frq=frq,method='lsqfast')\r\n \r\n amp,phs = U.phsamp(phsbase=basetime)\r\n \r\n return amp, phs, frq, frqnames, U.invfft()", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def stft(db,istart=0,istop=86400,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**9):\r\n \r\n #get length of input time series if there is two columns\r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm<fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=fx.shape[0]\r\n fm=1\r\n if fm>1:\r\n fx=fx.reshape(fn)\r\n else:\r\n fx=fx.reshape(fn)\r\n #make a hanning window to minimize aliazing and Gibbs effect of short time \r\n #windows\r\n h=normalizeL2(np.hanning(nh))\r\n #make a hanning window to smooth in frequency domain\r\n if ng!=1:\r\n if np.remainder(ng,2)!=1:\r\n ng=ng-1\r\n print 'ng forced to be odd as ng-1'\r\n else:\r\n pass\r\n g=normalizeL2(np.hanning(ng))\r\n else:\r\n pass\r\n #make time step list\r\n tlst=np.arange(start=0,stop=fn-nh+1,step=tstep)\r\n #make a frequency list for plotting exporting only positive frequencies\r\n df=float(df)\r\n flst=np.fft.fftfreq(nfbins,1/df)[0:nfbins/2] #get only positive frequencies\r\n #initialize the TFD array\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n fa=sps.hilbert(dctrend(fx))\r\n \r\n for place,ii in enumerate(tlst):\r\n fxwin=fa[ii:ii+nh]*h\r\n #get only positive frequencies\r\n FXwin=np.fft.fft(padzeros(fxwin,npad=nfbins))[:nfbins/2]\r\n #smooth in frequency plane\r\n if ng!=1:\r\n FXwin=np.convolve(padzeros(FXwin,npad=len(FXwin)+ng-1),g,'valid')\r\n else:\r\n pass\r\n #pull out only positive quadrant, flip array for plotting\r\n tfarray[:,place]=FXwin[::-1]\r\n \r\n return tfarray,tlst,flst", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def compute_single_ph_gfunc(\n obj: floquet_analysis.FloquetAnalyzer,\n freqs: np.array) -> np.ndarray:\n # Compute the decay operator and the convolved excitation operator within\n # the single-excitation subspace.\n decay_op = obj.decay_op(1)\n ex_op_conv = obj.ex_op_conv(1, freqs)\n gfunc_time = np.matmul(\n decay_op[np.newaxis, :, :, :], ex_op_conv)[:, :, 0, 0]\n return gfunc_time, np.fft.fftshift(np.fft.ifft(gfunc_time, axis=1), axes=1)", "def brightness_temperature(frequency, beam_area=None):\n if frequency.unit.is_equivalent(si.sr):\n if not beam_area.unit.is_equivalent(si.Hz):\n raise ValueError(\n \"The inputs to `brightness_temperature` are frequency and angular area.\"\n )\n warnings.warn(\n \"The inputs to `brightness_temperature` have changed. \"\n \"Frequency is now the first input, and angular area \"\n \"is the second, optional input.\",\n AstropyDeprecationWarning,\n )\n frequency, beam_area = beam_area, frequency\n\n nu = frequency.to(si.GHz, spectral())\n factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value\n factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value\n\n if beam_area is not None:\n beam = beam_area.to_value(si.sr)\n\n def convert_Jy_to_K(x_jybm):\n return x_jybm / beam / factor_Jy\n\n def convert_K_to_Jy(x_K):\n return x_K * beam / factor_K\n\n return Equivalency(\n [\n (astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),\n (astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),\n ],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )\n else:\n\n def convert_JySr_to_K(x_jysr):\n return x_jysr / factor_Jy\n\n def convert_K_to_JySr(x_K):\n return x_K / factor_K # multiplied by 1x for 1 steradian\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )", "def gtgram(wave,fs,window_time, hop_time,channels,f_min,f_max):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gt.gtgram_strides(fs,window_time, hop_time, xe.shape[1])\n y = np.zeros((channels, ncols))\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n return y", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]", "def tsz_spectrum(self, nu):\n x = NU_SCALE * nu # Frequency/temperature\n #g_nu = ( x*(np.exp(x) + 1.) / (np.exp(x) - 1.) ) - 4. # tSZ spectral dependence\n g_nu = x**2. * np.exp(x) * (x/np.tanh(x/2.) - 4.) / (np.exp(x) - 1.)**2.\n return g_nu", "def k_2_jy(freq: float, theta_major: float,\n theta_minor: float, brightness: float) -> float:\n conv = (1.222E3 * (freq ** -2) / theta_minor / theta_major) ** -1\n return brightness * conv", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def shf(self, tl, ta, lai):\n\t\treturn CP_A*RHO_A*self.GA*(tl-ta)/1000.*lai", "def get_brightnesstemperature(self, channel):\n K1 = {\n \"10\": 3040.136402, # Constant K1 [W m-2 um-1].\n \"11\": 2482.375199,\n \"12\": 1935.060183,\n \"13\": 866.468575,\n \"14\": 641.326517,\n }\n\n K2 = {\n \"10\": 1735.337945, # Constant K2 [K].\n \"11\": 1666.398761,\n \"12\": 1585.420044,\n \"13\": 1350.069147,\n \"14\": 1271.221673,\n }\n\n return K2[channel] / np.log((K1[channel] / self.get_radiance(channel)) + 1)", "def frequencies(self):\r\n\r\n # Get the sampling rate from the seed time-series:\r\n self.method['Fs'] = self.method.get('Fs', self.seed.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def sky(ants=0, tmo=15, waiton=ALL, subarray=DEFAULT, setAttens=True) :\n antlist = helpers.makeList(ants)\n return cal(carma.antenna.common.CalibratorControl.SKY, antlist, tmo, \n waiton, subarray=subarray, setAttens=setAttens)", "def A_TT_fg(self, L, fCfg):\n if L>2.*self.CMB.lMaxT:\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>self.CMB.lMaxT:\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.f_TT_fg(l1, l2, phi, fCfg) * self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.A_TT.__func__, \"integ\"):\n self.A_TT.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(self.CMB.lMaxT)], [0., np.pi]])\n self.A_TT.integ(integrand, nitn=8, neval=1000)\n\n result = self.A_TT.integ(integrand, nitn=1, neval=5000)\n# result = self.A_TT.integ(integrand, nitn=8, neval=5000)\n# result = self.A_TT.integ(integrand, nitn=4, neval=1000)\n\n result = result.mean\n\n # multiply by N^{0 phi}, to get dimensionless multiplicative bias\n result *= self.fN_phi_TT(L)\n \n if not np.isfinite(result):\n result = 0.\n return result", "def infilRateGA(Ks, presHead, thetaSat, thetaInit, F, tp):\n numerator = Ks*np.absolute(presHead)*(thetaSat - thetaInit)\n fraction = numerator/F\n f = Ks + fraction\n\n return f", "def get_gamma_coeffs(\n self, freq: tp.FreqType | None = None, ant_s11: np.ndarray | None = None\n ):\n if freq is None:\n freq = self.calobs.freq.freq\n\n lna = self.calobs.receiver_s11(freq)\n\n if ant_s11 is None:\n ant_s11 = self.antenna_s11_model(freq)\n return rcf.get_K(lna, ant_s11)", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def get_gaba_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_gaba_neurons()):\n spktimes_singlesweep.append(\n np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[\n 0\n ]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def _convert_frequency(self, frequency):\n\n # Assert frequency lies within allowed range\n FTW_step = self.clock_freq/2**32\n FTW = round(frequency/FTW_step)\n Min_freq = 0\n Max_freq = (2**32-1)*FTW_step\n\n assert FTW >= 0, 'Minimum frequency is %r' %Min_freq\n assert FTW <= 2**32, 'Maximum frequency is %r' %Max_freq\n\n # Compute frequency tuning word for given frequency and clock frequency\n # Write FTW in list of 4 bytes (CFTW0 is a 32-bit register)\n return [FTW.to_bytes(4, 'big')[i] for i in range(4)]", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def getsky(image,silent=False,circlerad=False,meanback=False,highbad=None,\n readnoise=None,integer=False,histback=False,nan=False): \n\n if image.ndim not in [1,2]:\n raise ValueError('ERROR - Input array (first parameter) must be 1 or 2 dimensional')\n \n checkbad = ( (highbad is not None) or circlerad or nan)\n sh = image.shape\n if image.ndim==1:\n ncol = 1\n nrow = image.size\n else:\n ncol,nrow = image.shape\n\n if circlerad:\n if ncol != nrow: \n raise ValueError('ERROR - The CIRCLERAD keyword only applies to a 2-d square array')\n \n if checkbad: \n mask = np.ones(image.shape,bool)\n if highbad is not None:\n mask = mask & (image < highbad) \n if nan: \n mask = mask & np.isfinite(image) \n if circlerad: \n if circlerad == 1: \n rad = nrow/2 \n else: \n rad = int(circlerad) \n # Make image where each value is its distance to a given center\n xv,yv = np.meshgrid(np.arange(nrow),np.arange(nrow))\n cen = (nrow-1)/2.\n drad = np.sqrt((xv-cen)**2+(yv-cen)**2) \n #dist_circle,drad, nrow \n mask = mask and (drad < rad) \n npts = np.sum(mask)\n else:\n npts = image.size\n \n # Use ~10000 data points or at least 2 points per row \n maxsky = np.maximum(2*npts//(nrow-1), 10000) # Maximum # of pixels to be used in sky calculation \n # Maintain the same data type as the input image Nov 2005 \n istep = npts//maxsky +1\n skyvec = np.zeros(maxsky+500,dtype=image.dtype)\n #skyvec = make_array(maxsky+200,type=size(image,/type)) \n nstep = (nrow//istep) \n \n jj = 0 \n index0 = istep*np.arange(nstep) \n if nstep > 1: \n i0 = np.maximum((nrow-1 - max(index0) - istep)//2, 0) # Adjust margin for symmetry \n index0 = index0 + i0 \n \n # The beginning index in each row is staggered to avoid emphasizing possible \n # bad columns \n \n for i in range(ncol): \n index = index0 + (i % istep) \n row = image[i,:] \n if checkbad: \n g, = np.where(mask[i,:]) \n ng = len(g)\n if ng==0:\n break\n row = row[g] \n else:\n ng = nrow \n imax = np.maximum(np.searchsorted(index, ng-1), 0)\n #imax = value_locate( index, ng-1) > 0 \n ix = np.minimum( index[0:imax], ng-1)\n skyvec[jj:jj+len(ix)] = row[ix] \n jj += imax\n if jj > maxsky: \n break \n\n skyvec = skyvec[0:jj] \n\n if meanback: \n skymode, skysig, subs = utils.meanclip(skyvec)\n nsky = len(subs) \n else:\n skymode,skysig,skynew,nsky = utils.mmm(skyvec,readnoise=readnoise,integer=integer,highbad=highbad)\n \n # Use histogram around median to get mode \n if histback:\n gd = (np.abs(image-skymode) < 4*skysig) \n xhist = np.arange(np.min(image[gd]),np.max(image[gd]),skysig/40)\n hist,bin_edges = np.histogram(image[gd],bins=xhist)\n xhist2 = np.linspace(np.min(xhist),np.max(xhist),1000)\n hist2 = np.interp(xhist2,xhist[:-1],hist)\n bestind = np.argmax(hist2)\n skymode1 = np.copy(skymode) # save original one \n skymode = xhist2[bestind] \n \n \n skymode = float(skymode)\n skysig = float(skysig) \n if silent==False:\n print('Number of points used to find sky = ',nsky)\n print('Approximate sky value for this frame = ',skymode)\n print('Standard deviation of sky brightness = ',skysig)\n \n return skymode,skysig", "def toHEC(rts):\n pathname = rts.name\n values = rts.getYArray()\n import jarray\n times = jarray.zeros(len(values),'i')", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def _filter_frequencies(self):\n import scipy.signal as spsg\n freq_bands = ['alpha', 'beta', 'gamma']\n if len(freq_bands) != self.n_bands:\n raise ValueError('Rename frequency bands')\n freqs_ts = np.empty([0, self.total_trials, self.ms, self.n_raw_features])\n for i_band in range(self.n_bands):\n freq_band = freq_bands[i_band]\n\n if freq_band == 'alpha':\n low_f = 8./self.sampling_freq\n high_f = 15./self.sampling_freq\n elif freq_band == 'beta':\n # beta\n low_f = 15./self.sampling_freq\n high_f = 32./self.sampling_freq\n elif freq_band == 'gamma':\n # gamma\n low_f = 32./self.sampling_freq\n high_f = 80./self.sampling_freq\n else:\n raise NameError('unknown filter')\n\n b, a = spsg.iirfilter(self.band_filter_order, [low_f, high_f],\n btype='bandpass', ftype='butter', output='ba')\n # ts_data: (trials, t, n)\n filtered_ts = spsg.filtfilt(b, a, self.ts_data, axis=-2)\n freqs_ts = np.concatenate((freqs_ts, np.array([filtered_ts])))\n\n return freqs_ts", "def evaluateFreq (self, time, station, freq):\n return self._response.evaluate3(time, station, freq)", "def freq_of_t(self, times):\n y = np.zeros(len(times))\n amplitudes, phases = self._ampl_phase(self.fourier_coeffs)\n for n, (ampl, phase) in enumerate(zip(amplitudes, phases)):\n omega_n = 2*np.pi*self.pzt_freq*(n+1)\n y+=ampl*np.cos(omega_n*times + phase)\n return y", "def constant_sfh(time_bins, sfr):\n return np.array([sfr]*len(time_bins))", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def global_sky_background(self, LF):\n # Variables:\n s = 9 # Number of subframes (CHANGE IF NEEDED!) E.g. 4, 9, 16 etc. \n n = self.h*self.w/(self.h+self.w) # Number of pixels used in subframes scales with image dim \n nrows = self.h/(s/2) # Numbers of rows in each subframe\n ncols = self.w/(s/2) # Numbers of columns in each subframe\n\n # Reshape light frame into subframe:\n LF_sub = (LF.reshape(self.h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols))\n\n # Loop over all subframes:\n min_val = np.zeros((s,n))\n for i in range(s):\n # Loops over all pixels:\n for j in range(n):\n min_val[i,j] = np.min(LF_sub[i]) # Minimum value for array\n min_dex = np.where(LF_sub[i] == min_val[i,j]) # Find row, column for min value\n # Min pixel is set to max in order to find the next min:\n LF_sub[i, min_dex[0][0], min_dex[1][0]] = np.max(LF_sub[i]) \n\n # Flux:\n flux_sky = 3*median(min_val) - 2*mean(min_val) # Mean flux from pixels\n return flux_sky", "def get_freq_grid():\n (bins_per_octave, n_octaves, _, _, f_min, _) = get_hcqt_params()\n freq_grid = librosa.cqt_frequencies(\n bins_per_octave*n_octaves, f_min, bins_per_octave=bins_per_octave\n )\n return freq_grid", "def get_sound_speed_temperature(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n C_T = (\n 2 ** (1 / 4)\n * np.sqrt(gbar)\n / np.pi\n * np.sqrt(vv)\n * tt ** (5 / 4)\n * _1d_call(_fdk, y, k=1 / 2)\n / np.sqrt(_1d_call(_fdk, y, k=-1 / 2))\n )\n return C_T", "def flux_sensitivity(freq_hz, t_acc=5, bw_hz=100e3, num_antennas=256, eta=1,\n t_sys=None, a_eff=None):\n t_sys = system_temp(freq_hz) if t_sys is None else t_sys\n a_eff = (element_effective_area(freq_hz) * num_antennas) if a_eff is None \\\n else a_eff\n sefd = (2 * const.k_B.value * t_sys * eta) / a_eff\n sigma_s = (sefd * 1e26) / sqrt(2 * bw_hz * t_acc)\n return sigma_s", "def calculate_color_temperature(r: int, g: int, b: int) -> float:\n\n # 1. Map RGB values to their XYZ counterparts.\n # Based on 6500K fluorescent, 3000K fluorescent\n # and 60W incandescent values for a wide range.\n # Note: Y = Illuminance or lux\n x = (-0.14282 * r) + (1.54924 * g) + (-0.95641 * b)\n y = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)\n z = (-0.68202 * r) + (0.77073 * g) + (0.56332 * b)\n\n # 2. Calculate the chromaticity co-ordinates\n xchrome = x / (x + y + z)\n ychrome = y / (x + y + z)\n\n # 3. Use to determine the CCT\n n = (xchrome - 0.3320) / (0.1858 - ychrome)\n\n # 4. Calculate the final CCT\n cct = (449.0 * pow(n, 3)) + (3525.0 * pow(n, 2)) + (6823.3 * n) + 5520.33\n\n # Return the results in degrees Kelvin\n return cct", "def sky_noise_weighting(file_name, sky_file_name):\n cs_data = spectra_analysis(file_name, sky_file_name)\n cube_data = cs_data['gd_shifted']\n sn_data = cs_data['sky_noise']\n wl_soln = wavelength_solution(file_name)\n\n sn_data_min = np.min(sn_data)\n in_wt = 1 / (sn_data - sn_data_min + 1)\n\n sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise\n for i in range(len(in_wt)): \n data_acl = cube_data[i]\n data_sky = sn_data[i]\n data_prb = in_wt[i]\n \n if ( 0.00 <= np.abs(data_prb) <= 1.00 ):\n sky_regns[i][0] = data_prb\n sky_regns[i][1] = data_sky\n\n # finding max peak in the sky-noise data and fitting a Gaussian to that\n # x-axis data\n x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n\n # Finding peaks with PeakUtils\n sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)\n sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)\n\n if (sky_peaks_x.size != 0):\n sky_peak = sky_peaks_x[0]\n sky_peak_index = find_nearest(sky_peak, x_range)\n else:\n sky_peak = 6000\n sky_peak_index = 0\n\n sky_peak_loc = x_range[sky_peak_index]\n\n sky_peak_range = [sky_peak-100, sky_peak+100]\n sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]\n\n sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n\n sky_gauss_params = Parameters()\n sky_gauss_params.add('c', value=0)\n sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)\n sky_gauss_params.add('mu', value=sky_peak_loc)\n sky_gauss_params.add('sigma1', value=3)\n\n sky_gauss_model = Model(sn_gauss)\n sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x, \n params=sky_gauss_params)\n sky_gauss_best = sky_gauss_rslt.best_values\n\n sky_sigma = sky_gauss_best['sigma1']\n\n return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}", "def chroma_stft(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.chroma_stft(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def hz2mel(freq):\n return 2595. * np.log10(1+freq/700.0)", "def jy_2_k(freq: float, theta_major: float,\n theta_minor: float, intensity: float) -> float:\n conv = 1.222E3 * (freq ** -2) / theta_minor / theta_major\n return intensity * conv", "def sort_spectrum(freq, flux, brightness_temp):\n # Speed of light in microns/sec\n clight = 2.9979e14\n wave = clight / (freq * 1e9)\n\n # Sort the data by wavelength\n sortind = np.argsort(wave)\n w = wave[sortind]\n f = flux[sortind]\n t = brightness_temp[sortind]\n\n return w, f, t", "def robuststftL(fx,alpha=.325, nh=2**8,tstep=2**5,df=1.0,nfbins=2**10):\r\n \r\n #get length of input time series \r\n nfx=len(fx)\r\n \r\n #compute time shift list\r\n mlst=np.arange(start=-nh/2+1,stop=nh/2+1,step=1,dtype='int')\r\n #compute time locations to take STFT\r\n tlst=np.arange(start=0,stop=nfx-nh+1,step=tstep)\r\n \r\n #make a frequency list for plotting exporting only positive frequencies\r\n flst=np.fft.fftfreq(nfbins,1/df)\r\n flstc=flst[nfbins/2:]\r\n #Note: these are actually the negative frequencies but works better for\r\n #calculations\r\n flstp=flst[0:nfbins/2]\r\n \r\n #make time window and normalize\r\n sigmanh=nh/(6*np.sqrt(2*np.log(2)))\r\n h=sps.gaussian(nh,sigmanh)\r\n h=h/sum(h)\r\n \r\n #create an empty array to put the tf in and initialize a complex value\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n #take the hilbert transform of the signal to make complex and remove\r\n #negative frequencies\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa/fa.std()\r\n \r\n #make a frequency list for plotting exporting only positive frequencies\r\n flst=np.fft.fftfreq(nfbins,1/df)[nfbins/2:]#get only positive frequencies\r\n \r\n #create list of coefficients\r\n a=np.zeros(nh)\r\n a[(nh-2)*alpha:alpha*(2-nh)+nh-1]=1./(nh*(1-2*alpha)+4*alpha)\r\n \r\n for tpoint,nn in enumerate(tlst):\r\n #calculate windowed correlation function of analytic function\r\n fxwin=h*fa[nn:nn+nh]\r\n for fpoint,mm in enumerate(flstc):\r\n fxelement=fxwin*np.exp(1j*2*np.pi*mlst*mm/df)\r\n fxreal=np.sort(fxelement.real)[::-1]\r\n fximag=np.sort(fxelement.imag)[::-1]\r\n tfpoint=sum(a*(fxreal+1j*fximag))\r\n if tfpoint==0.0:\r\n tfarray[fpoint,tpoint]=1E-10\r\n else:\r\n tfarray[fpoint,tpoint]=tfpoint\r\n #normalize tfarray\r\n tfarray=(4.*nh*df)*tfarray\r\n \r\n return tfarray,tlst,flstp", "def get_kf_kms(self):\n kfkms = np.array([ self.kf / (ss.velfac * 3.085678e24/ss.units.UnitLength_in_cm) for ss in self.spectrae])\n return kfkms", "def frequencies(self):\n if self.getn(\"frequency/type\") == \"custom\":\n # The value is validated to be a float list\n frequencies = self.getn(\"frequency/frequencies\")\n else:\n # Calculate the frequency values\n start = self.getn(\"frequency/start\")\n stop = self.getn(\"frequency/stop\")\n step = self.getn(\"frequency/step\")\n num = int((stop - start) / step + 1)\n frequencies = [start + step*i for i in range(num)]\n return frequencies", "def big_sky(nside=32, weights={'u': [0.31, 0.15, False], 'g': [0.44, 0.15],\n 'r': [1., 0.3], 'i': [1., 0.3], 'z': [0.9, 0.3],\n 'y': [0.9, 0.3, False]}):\n\n # wfd covers -72.25 < dec < 12.4. Avoid galactic plane |b| > 15 deg\n wfd_north = np.radians(12.4)\n wfd_south = np.radians(-72.25)\n full_north = np.radians(30.)\n g_lat_limit = np.radians(8.)\n\n ra, dec = _hpid2RaDec(nside, np.arange(hp.nside2npix(nside)))\n total_map = np.zeros(ra.size)\n coord = SkyCoord(ra=ra*u.rad, dec=dec*u.rad)\n g_long, g_lat = coord.galactic.l.radian, coord.galactic.b.radian\n\n # let's make a first pass here\n\n total_map[np.where(dec < full_north)] = 1e-6\n total_map[np.where((dec > wfd_south) &\n (dec < wfd_north) &\n (np.abs(g_lat) > g_lat_limit))] = 1.\n\n # Now let's break it down by filter\n result = {}\n\n for key in weights:\n result[key] = total_map + 0.\n result[key][np.where(result[key] == 1)] = weights[key][0]\n result[key][np.where(result[key] == 1e-6)] = weights[key][1]\n if len(weights[key]) == 3:\n result[key][np.where(dec > wfd_north)] = 0.\n\n return result", "def get_pixel_skydirs(self):\n sky_coords = self._hpx.get_sky_coords()\n if self.hpx.coordsys == 'GAL':\n return SkyCoord(l=sky_coords.T[0], b=sky_coords.T[1], unit='deg', frame='galactic')\n else:\n return SkyCoord(ra=sky_coords.T[0], dec=sky_coords.T[1], unit='deg', frame='icrs')", "def test_scalar_skycoord():\n\n data = make_4gaussians_image()\n wcs = make_wcs(data.shape)\n skycoord = wcs.pixel_to_world(90, 60)\n aper = SkyCircularAperture(skycoord, r=0.1 * u.arcsec)\n tbl = aperture_photometry(data, aper, wcs=wcs)\n assert isinstance(tbl['sky_center'], SkyCoord)", "def zenith_gain(freq):\n parfile = open(project_path\n + \"DSS-28_technical/efficiency_vs_freq_pars.pkl\",\"r\")\n pars = cPickle.load(parfile)\n parfile.close()\n effic = {}\n avg_effic = 0\n for key in list(pars.keys()):\n effic[key] = pars[key](freq)/100.\n avg_effic += effic[key]\n # right now I don't know what Pol A and Pol B are\n avg_effic /= len(list(pars.keys()))\n return avg_effic", "def getGasTemperature(grid=None, ppar=None):\n mesh = np.meshgrid(grid.x, grid.y, grid.z, indexing='ij')\n if ppar['crd_sys'] == 'sph':\n rr = mesh[0]\n tt = mesh[1]\n pp = mesh[2]\n xx = rr * np.sin(tt) * np.cos(pp)\n yy = rr * np.sin(tt) * np.sin(pp)\n zz = rr * np.cos(tt)\n cyrr = np.sqrt(xx**2. + yy**2.)\n elif ppar['crd_sys'] == 'car':\n xx = mesh[0]\n yy = mesh[1]\n zz = mesh[2]\n rr = np.sqrt(xx**2 + yy**2 + zz**2)\n cyrr = np.sqrt(xx**2. + yy**2.)\n else:\n raise ValueError('crd_sys not specified in ppar')\n\n ztrans = ppar['zqratio'] * ppar['Ht'] * (cyrr / ppar['Rt'])**(ppar['qheight'])\n\n tatm = ppar['T0atm'] * (rr / ppar['Rt'])**ppar['qatm']\n tmid = ppar['T0mid'] * (cyrr / ppar['Rt'])**ppar['qmid']\n\n if ppar['zqratio'] > 0:\n tgas = tatm\n reg = abs(zz) < ztrans\n\n tgas[reg] = tatm[reg] + (tmid[reg] - tatm[reg]) * ((np.cos(np.pi*0.5 * abs(zz[reg])/ztrans[reg]))**(2*ppar['hdel']))\n elif ppar['zqratio'] == 0:\n tgas = tatm\n elif ppar['zqratio'] == -1:\n tgas = tmid\n else:\n raise ValueError('zqratio value not accepted')\n\n reg = tgas < ppar['cuttemp']\n tgas[reg] = ppar['cuttemp']\n\n return tgas", "def test_filt_stmag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=u.STmag)\n assert np.isclose(fluxd.value, -26.76, atol=0.003)", "def DFTpower2(time, signal, freqs):\n \n powerSpectrum = np.zeros(len(freqs))\n\n for i, freq in enumerate(freqs):\n arg = 2.0 * np.pi * freq * time\n powerSpectrum[i] = np.sum(signal * np.cos(arg))**2 + np.sum(signal * np.sin(arg))**2\n\n powerSpectrum = powerSpectrum * 4.0 / len(time)**2\n return(powerSpectrum)", "def estimateKappaQuad(self,powerTT=None,callback=\"camb_dimensionless\",noise_keys=None,lmax=3500,filtering=None):\n\n\t\t#CMB lensing routines\n\t\tqlens = Lens()\n\n\t\t#Compute Phi FFT, take the laplacian\n\t\tphifft = self.estimatePhiFFTQuad(powerTT,callback,noise_keys,lmax,filtering)\n\t\tkappafft = phifft*0.5*qlens._cache[\"ell2\"]\n\n\t\t#Invert the FFT\n\t\tkappa = fftengine.ifft2(kappafft)\n\n\t\t#Return\n\t\treturn ConvergenceMap(kappa.real,angle=self.side_angle)", "def waveform_2_stft(waveform, frame_length=512, frame_step=128, n_mel_bins=None,\n mel_lower_hertz_edge=0.0, mel_upper_hertz_edge=8000.0):\n\n if len(waveform.shape) == 1:\n waveform = tf.expand_dims(waveform, 0)\n\n stft = tf.signal.stft(\n waveform, frame_length=frame_length, frame_step=frame_step,\n pad_end=True, window_fn=WINDOW_FN\n )\n\n # Truncate the nyquist frequency, commonly done in other papers,\n # also makes computation easier.\n real = tf.math.real(stft)[:, :, 0:-1]\n img = tf.math.imag(stft)[:, :, 0:-1]\n\n if n_mel_bins:\n real = _linear_to_mel_scale(\n real, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n img = _linear_to_mel_scale(\n img, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n return tf.concat([tf.expand_dims(real, 3),\n tf.expand_dims(img, 3)], axis=-1)", "def frequencyEstimator(ctd, ladcp, bathy, rho_neutral, strain,\\\n wl_min=100, wl_max=500, full_set=False):\n \n U, V, p_ladcp = oc.loadLADCP(ladcp)\n S, T, p_ctd, lat, lon = oc.loadCTD(ctd)\n \n \n Ek, Ep, Etotal, eta_power,\\\n Upow, Vpow, UVkx, eta_kx,\\\n N2mean, wl_min, wl_max,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec =\\\n internal_wave_energy(ctd, ladcp,\\\n rho_neutral,\\\n bathy, strain, wl_min=wl_min, wl_max=wl_max)\n \n eta_power_export = np.vstack(eta_power)\n eta_kx_export = np.vstack(eta_kx)\n Up_export = np.vstack(Upow)\n Vp_export = np.vstack(Vpow)\n UVkx_export = np.vstack(UVkx)\n \n\n np.savetxt('eta_power.csv',eta_power_export)\n np.savetxt('eta_kx.csv',eta_kx_export)\n np.savetxt('Upow.csv',Up_export)\n np.savetxt('Vpow.csv',Vp_export)\n np.savetxt('UVkx.csv',UVkx_export)\n\n\n \n \n # look for wavenumber maxes\n \n \n # Use ratios to solve for internal frequncys\n f = np.nanmean(gsw.f(lat))\n \n omega = f*np.sqrt(Etotal/(Ek-Ep))\n\n m = np.mean((wl_min, wl_max))\n m = (2*np.pi)/m\n kh = (m/np.sqrt(np.abs(N2mean)))*(np.sqrt(omega**2 - f**2))\n mask = kh == 0\n kh[mask]= np.nan\n lambdaH = 1e-3*(2*np.pi)/kh\n \n # get mean spectra\\\n \n eta_mean = []\n for station in eta_power:\n eta_mean.append(np.nanmean(station, axis=0))\n \n eta_mean = np.vstack(eta_mean).T\n \n \n aspect = kh/m \n \n file2save = pd.DataFrame(lambdaH)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('lambdaH.xlsx')\n file2save = pd.DataFrame(kh)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('Kh.xlsx')\n file2save = pd.DataFrame(omega)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('omega.xlsx')\n file2save = pd.DataFrame(aspect)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('aspect.xlsx')\n \n np.savetxt('eta_mean.csv', eta_mean)\n \n \n np.savetxt('kh.csv', kh)\n np.savetxt('lamdah.csv', lambdaH)\n np.savetxt('omega.csv', omega)\n \n if full_set:\n return lambdaH, kh, omega, N2mean,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec, aspect\n \n else:\n return lambdaH, kh, omega, N2mean", "def element_area_and_temperature(freq_hz):\n # Element noise data.\n noise_data = {\n 'freqs': [0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9,\n 0.45e9, 0.55e9, 0.65e9],\n 'a_eff': [1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956,\n 0.2046, 0.1384, 0.0792],\n 't_sys': [4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3]\n }\n log_freq = numpy.log10(freq_hz)\n freqs = numpy.array(noise_data['freqs'])\n a_eff = numpy.array(noise_data['a_eff'])\n t_sys = numpy.array(noise_data['t_sys'])\n f_cut = 2\n\n # Interpolate to get effective area.\n if freq_hz <= freqs[f_cut]:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[:f_cut+1]), \n numpy.log10(a_eff[:f_cut+1]), kind='slinear')\n a_eff = 10**f(log_freq)\n else:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[f_cut:]), \n numpy.log10(a_eff[f_cut:]), kind='cubic')\n a_eff = 10**f(log_freq)\n\n # Interpolate to get system temperature.\n f = scipy.interpolate.interp1d(numpy.log10(freqs), \n numpy.log10(t_sys), kind='cubic')\n t_sys = 10**f(log_freq)\n return a_eff, t_sys", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def speed_of_sound(altitude):\n t = temperature(altitude) # R\n a = sqrt(gamma*gas_constant*t) # [ft/s]\n return a", "def galaxy():\n rot_ang = 1\n pol_ang = 1\n\n\n time_array = [datetime.datetime(2017, 5, 25, 2, 0),\n datetime.datetime(2017, 5, 26, 7, 0),\n #~ datetime.datetime(2017, 5, 28, 1, 0),\n #~ datetime.datetime(2017, 5, 30, 8, 0),\n datetime.datetime(2017, 6, 4, 2, 0)]\n\n lfdic = {1:{'name':'LI', 'lat':[26,33,19.676], 'long':[97,26,31.174], 't_offset':6.496132851851852},\n 2:{'name':'LII', 'lat':[34,4,43.497], 'long':[107,37,5.819], 't_offset':7.174552203703703},\n 3:{'name':'LIII', 'lat':[38,25,59.0], 'long':[79,50,23.0], 't_offset':5.322648148148148},\n 4:{'name':'LIV', 'lat':[34,12,3.0], 'long':[118,10,18.0], 't_offset':7.87811111111111}}\n lfs = lfdic[4]\n long_radians = (lfs['long'][0] + lfs['long'][1]/60.0 + lfs['long'][2]/3600.0)*np.pi/180.0\n\n LoFASM = station(lfs['name'],lfs['lat'],lfs['long'],FOV_color='b',\n time='',frequency=20.0,one_ring='inner',\n rot_angle=rot_ang,pol_angle=pol_ang)\n innerNS_FOV = 0.61975795698554226 #LoFASM.lofasm.Omega()\n inner_conversion_NS = np.divide((np.power(np.divide(3.0*1.0e8,45.0e6),2)),(innerNS_FOV))\n\n print('Stage 1/2 Done.')\n\n powe = np.multiply(LoFASM.calculate_gpowervslstarray(time_array),inner_conversion_NS)\n power = 10*np.log10(np.array(powe))\n print('Stage 2/2 Done.')\n\n return power", "def get_features(y, sr, n_fft=1024, hop_length=512):\n\n # Selected features:\n features = {'centroid': None, 'roloff': None, 'flux': None, 'rmse': None, 'zcr': None, 'chroma': None}\n \n # Using librosa to calculate the features\n features['centroid'] = librosa.feature.spectral_centroid(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()\n features['roloff'] = librosa.feature.spectral_rolloff(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()\n features['zcr'] = librosa.feature.zero_crossing_rate(y, frame_length=n_fft, hop_length=hop_length).ravel()\n features['rmse'] = librosa.feature.rms(y, frame_length=n_fft, hop_length=hop_length).ravel()\n features['flux'] = librosa.onset.onset_strength(y=y, sr=sr).ravel()\n features['chroma'] = librosa.feature.chroma_stft(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()\n \n # Treatment of MFCC feature\n mfcc = librosa.feature.mfcc(y, n_fft=n_fft, hop_length=hop_length, n_mfcc=13)\n for idx, v_mfcc in enumerate(mfcc):\n features[f'mfcc_{idx}'] = v_mfcc.ravel()\n \n # Calculate statistics for each feature:\n def get_moments(descriptors):\n result = {}\n for k, v in descriptors.items():\n result[f'{k}_mean'] = np.mean(v)\n result[f'{k}_std'] = np.std(v)\n result[f'{k}_kurtosis'] = kurtosis(v)\n result[f'{k}_skew'] = skew(v)\n return result\n \n dict_agg_features = get_moments(features)\n \n # Calculating one more feature:\n dict_agg_features['tempo'] = librosa.beat.tempo(y, sr=sr)[0]\n \n return dict_agg_features", "def getSpectrumKurucz(teff=None, logg=None, mstar=None, lstar=None, rstar=None, modeldir=None, wav=None):\n\n #\n # Sanity check\n #\n if teff is None:\n raise ValueError('Unknown teff. Kurucz spectrum cannot be calculated without the stellar effective'\n + 'temperature.')\n if logg is None:\n if mstar is None:\n raise ValueError('Unknown logg and mstar. For Kurucz atmosphere models either logg or mstar and '\n + ' rstar/lstar should be specified')\n else:\n if rstar is None:\n if lstar is None:\n raise ValueError('Unknown logg, rstar, lstar thus logg cannot be calculated. For Kurucz '\n + 'atmosphere models either logg should be given or a combination of mstar, lstar, rstar '\n + 'should be specified from which logg can be calculated for a given effective temperature. ')\n else:\n rstar = np.sqrt(lstar / (4. * np.pi * nc.ss * teff**4.))\n else:\n lstar = 4. * np.pi * rstar**2 * nc.ss * teff**4\n\n logg = np.log10(nc.gg * mstar / rstar**2)\n else:\n if rstar is None:\n if lstar is None:\n raise ValueError('Unknown rstar and lstar thus luminosity cannot be calculated. For Kurucz '\n + 'atmosphere models either lstar or rstar should be given')\n else:\n rstar = np.sqrt(lstar / (4. * np.pi * nc.ss * teff**4.))\n else:\n lstar = 4. * np.pi * rstar**2 * nc.ss * teff**4\n\n mstar = 10.**logg * rstar**2 / nc.gg\n\n assert (teff >= 0.), 'Negative effective temperature!'\n assert (mstar >= 0.), 'Negative stellar mass!'\n assert (lstar >= 0.), 'Negative stellar luminosity!'\n assert (rstar >= 0.), 'Negative stellar radius!'\n\n print('-------------------------------------------')\n print('Interpolating in Kurucz model atmospheres')\n print('Stellar parameters: ')\n print('-------------------------------------------')\n print('Teff [K] : ', teff)\n print('Radius [Rsun] : ', rstar / nc.rs)\n print('Luminosity [Lsun] : ', lstar / nc.ls)\n print('Mass [Msun] : ', mstar / nc.ms)\n print('logg : ', logg)\n print('-------------------------------------------')\n\n dum = readKuruczGrid(fname=modeldir + '/fp00k2.pck')\n #\n # Bracket in Teff\n #\n teff_grid = np.unique(dum['teff'])\n\n ii = abs(teff_grid - teff).argmin()\n idt1 = ii\n if teff_grid[ii] > teff:\n idt1 = ii - 1\n else:\n idt1 = ii\n idt2 = idt1 + 1\n\n #\n # Bracket in Logg\n #\n ii = (dum['teff'] == teff_grid[idt1])\n logg_grid_lower = dum['logg'][ii]\n ii = (dum['teff'] == teff_grid[idt2])\n logg_grid_upper = dum['logg'][ii]\n\n ii = abs(logg_grid_lower - logg).argmin()\n if logg < logg_grid_lower[0]:\n idg1 = -1\n idg2 = 0\n elif logg > logg_grid_lower[-1]:\n idg1 = logg_grid_lower.shape[0] - 1\n idg2 = -1\n else:\n idg1 = ii\n if logg_grid_lower[ii] > logg:\n idg1 = ii - 1\n else:\n idg1 = ii\n idg2 = idg1 + 1\n\n idgl1 = idg1\n idgl2 = idg2\n\n ii = abs(logg_grid_upper - logg).argmin()\n if logg < logg_grid_upper[0]:\n idg1 = -1\n idg2 = 0\n elif logg > logg_grid_upper[-1]:\n idg1 = logg_grid_upper.shape[0] - 1\n idg2 = -1\n else:\n idg1 = ii\n if logg_grid_upper[ii] > logg:\n idg1 = ii - 1\n else:\n idg1 = ii\n idg2 = idg1 + 1\n\n idgu1 = idg1\n idgu2 = idg2\n\n #\n # Check if we need to do a 3point bilinear interpolation\n #\n if ((idgl1 < 0) | (idgl2 < 0) | (idgu1 < 0) | (idgu2 < 0)):\n x = []\n y = []\n sp = []\n spc = []\n if idgl1 >= 0:\n x.append(teff_grid[idt1])\n y.append(logg_grid_lower[idgl1])\n ii = ((dum['teff'] == teff_grid[idt1]) & (dum['logg'] == logg_grid_lower[idgl1]))\n sp.append(np.squeeze(dum['inu'][ii, :]))\n spc.append(np.squeeze(dum['inucont'][ii, :]))\n if idgl2 >= 0:\n x.append(teff_grid[idt1])\n y.append(logg_grid_lower[idgl2])\n ii = ((dum['teff'] == teff_grid[idt1]) & (dum['logg'] == logg_grid_lower[idgl2]))\n sp.append(np.squeeze(dum['inu'][ii, :]))\n spc.append(np.squeeze(dum['inucont'][ii, :]))\n if idgu1 >= 0:\n x.append(teff_grid[idt2])\n y.append(logg_grid_upper[idgu1])\n ii = ((dum['teff'] == teff_grid[idt2]) & (dum['logg'] == logg_grid_upper[idgu1]))\n sp.append(np.squeeze(dum['inu'][ii, :]))\n spc.append(np.squeeze(dum['inucont'][ii, :]))\n if idgu2 >= 0:\n x.append(teff_grid[idt2])\n y.append(logg_grid_upper[idgu2])\n ii = ((dum['teff'] == teff_grid[idt2]) & (dum['logg'] == logg_grid_upper[idgu2]))\n sp.append(np.squeeze(dum['inu'][ii, :]))\n spc.append(np.squeeze(dum['inucont'][ii, :]))\n\n if len(x) != 3:\n msg = 'Something went wrong in the interpolation of the stellar atmosphere models..\\n ' \\\n + ' for a 3 point bilinear interpolation ' + (\"%d\" % len(x))+' indices were found.'\n raise ValueError(msg)\n\n else:\n print('Bracketed spectrum with Teff : ', teff, ' and logg : ', logg)\n print('Teff grid : ', x)\n print('Logg grid : ', y)\n\n c1 = ((y[1] - y[2]) * (teff - x[2]) + (x[2] - x[1]) * (logg - y[2])) / (\n (y[1] - y[2]) * (x[0] - x[2]) + (x[2] - x[1]) * (y[0] - y[2]))\n c2 = ((y[2] - y[0]) * (teff - x[2]) + (x[0] - x[2]) * (logg - y[2])) / (\n (y[1] - y[2]) * (x[0] - x[2]) + (x[2] - x[1]) * (y[0] - y[2]))\n c3 = 1. - c1 - c2\n\n lnu = c1 * sp[0] + c2 * sp[1] + c3 * sp[2]\n lnucont = c1 * spc[0] + c2 * spc[1] + c3 * spc[2]\n\n\n else:\n print('Bracketed spectrum with Teff : ', teff, ' and logg : ', logg)\n print('Teff grid : ', teff_grid[idt1], teff_grid[idt2])\n print('Logg grid : ', logg_grid_lower[idgl1], logg_grid_lower[idgl2])\n #\n # Do the standard four point bilinear interpolation\n #\n ii = ((dum['teff'] == teff_grid[idt1]) & (dum['logg'] == logg_grid_lower[idgl1]))\n sp11 = np.squeeze(dum['inu'][ii, :])\n ii = ((dum['teff'] == teff_grid[idt1]) & (dum['logg'] == logg_grid_lower[idgl2]))\n sp12 = np.squeeze(dum['inu'][ii, :])\n ii = ((dum['teff'] == teff_grid[idt2]) & (dum['logg'] == logg_grid_upper[idgu1]))\n sp22 = np.squeeze(dum['inu'][ii, :])\n ii = ((dum['teff'] == teff_grid[idt2]) & (dum['logg'] == logg_grid_upper[idgu2]))\n sp21 = np.squeeze(dum['inu'][ii, :])\n\n c11 = (teff_grid[idt2] - teff) * (logg_grid_upper[idgu2] - logg)\n c12 = (teff_grid[idt2] - teff) * (logg - logg_grid_upper[idgu1])\n c22 = (teff - teff_grid[idt1]) * (logg - logg_grid_lower[idgl1])\n c21 = (teff - teff_grid[idt1]) * (logg_grid_lower[idgl2] - logg)\n c00 = 1. / ((teff_grid[idt2] - teff_grid[idt1]) * (logg_grid_lower[idgl2] - logg_grid_lower[idgl1]))\n\n lnu = c00 * (c11 * sp11 + c12 * sp12 + c22 * sp22 + c21 * sp21)\n lnucont = c00 * (c11 * sp11 + c12 * sp12 + c22 * sp22 + c21 * sp21)\n\n nu = nc.cc / dum['wav'] * 1e4\n lum = (0.5 * abs(nu[1:] - nu[:-1]) * (lnu[1:] + lnu[:-1])).sum()\n lnu *= lstar / lum\n lnucont *= lstar / lum\n\n return {'wav': dum['wav'], 'lnu': lnu, 'lnucont': lnucont}", "def get_features(file, song_index=0):\n\n chroma = get_chroma(file, song_index)\n timbre = get_timbre(file, song_index)\n max_loudness = get_max_loudness(file, song_index)\n\n # normalize to get ~ 0-1\n timbre = (timbre + 1000) / 1200\n max_loudness = (max_loudness + 70) / 70\n max_loudness = max_loudness.reshape(-1, 1)\n features = np.hstack([timbre, chroma, max_loudness])\n return features", "def haurwitz(zenith):\n\n # GHI = 1098 * cos(z) * exp(-0.057 / cos(z))\n clearsky_ghi = 1098.0 * np.cos(np.radians(zenith)) * np.exp(-0.057 / np.cos(np.radians(zenith)))\n\n # remove negative values\n clearsky_ghi[clearsky_ghi < 0] = 0\n\n return clearsky_ghi", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def gassmann(Kdry, Ks, Kf, phi):\r\n\r\n Ksat = np.zeros(len(phi))\r\n Ksat[0] = Ks\r\n Ksat [1:]= Ks * (phi[1:] * Kdry[1:] - ((1 + phi[1:]) * Kf * Kdry[1:] /Ks) + Kf) / ( (1-phi[1:]) * Kf + phi[1:] * Ks - Kf * Kdry[1:] / Ks)\r\n\r\n return Ksat", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def gga_freq_abs(x, sample_rate, freq):\n # TODO: This is slow. Any way to improve it?\n lx = _BLOCK_SIZE\n pik_term = 2 * _PI * freq / sample_rate\n cos_pik_term = tf.cos(pik_term)\n cos_pik_term2 = 2 * cos_pik_term\n\n # TODO: Maybe if we make these states into proper variables and assign to them,\n # we will use less memory.\n # Use tf.zeros because zeros_initializer doesn't seem to work in tf 1.0.\n\n # number of iterations is (by one) less than the length of signal\n # Pipeline the first two iterations.\n s1 = tf.tile(x[:, 0, None], (1, _NUM_BANDS))\n s0 = x[:, 1, None] + cos_pik_term2 * s1\n s2 = s1\n s1 = s0\n\n def cond(ind, *_):\n return ind < lx - 1\n\n def body(ind, s):\n s1 = s[0]\n s2 = s[1]\n s0 = x[:, ind, None] + cos_pik_term2 * s1 - s2\n return (ind + 1, (s0, s1))\n\n _, s = tf.while_loop(\n cond,\n body,\n loop_vars=(tf.constant(2), (s1, s2)),\n parallel_iterations=1)\n s1 = s[0]\n s2 = s[1]\n\n s0 = x[:, lx - 1, None] + cos_pik_term2 * s1 - s2\n\n # TODO: Figure out why this doesn't work.\n # | s0 - s1 exp(-ip) |\n # | s0 - s1 cos(p) + i s1 sin(p)) |\n # sqrt((s0 - s1 cos(p))^2 + (s1 sin(p))^2)\n # sqrt(s0^2 - 2 s0 s1 cos(p) + s1^2 cos^2(p) + s1^2 sin^2(p))\n # sqrt(s0^2 + s1^2 - 2 s0 s1 cos(p))\n\n # y = stable_sqrt(s0**2 + s1**2 - s0*s1*cos_pik_term2)\n y = stable_sqrt((s0 - s1*cos_pik_term)**2 + (s1 * tf.sin(pik_term))**2)\n return y", "def test_3d_steam_freq():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback(dic,data)", "def temporal_filter(fc, L, srt, PASS = [2,3]):\n if issubclass(type(fc), str):\n fc = float(fc)\n t = np.arange(L).astype(np.float32)/srt\n k = t*fc\n h = np.sin(2*np.pi*k) * k**2 * np.exp(-3.5*k) * fc\n\n h = h-np.mean(h)\n H0 = np.fft.fft(h, n=2*L)\n A = np.angle(H0[0:L])\n H = np.abs(H0[0:L])\n maxi = np.argmax(H)\n H = H / (H[maxi] or 1)\n\n # passband\n if PASS[0] == 1:\n #low pass\n H[0:maxi] = 1\n elif PASS[0] == PASS[1]:\n #high pass\n H[maxi+1:L] = 1\n\n H = H * np.exp(1j*A)\n return H", "def get_kt(temps, delta_gibbs_ts):\n # rate coefficient from Eyring equation\n return KB / H * temps * np.exp(-delta_gibbs_ts / RG / temps) # [1/s] if unimolecular", "def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp]) \n for i in range(0, len(x)-framesamp, hopsamp)])\n return X", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def makespectfile(afile):\n x = []\n y = []\n with open(afile) as f:\n for line in f:\n if line.startswith('#'): continue\n (freq,flux) = line.split()\n x.append(float(freq))\n y.append(float(flux))\n return (np.asarray(x),np.asarray(y))", "def intensity(self, crystal):\n fp = os.path.join(os.path.dirname(__file__), \"database/atomic_scattering_params.json\")\n with open(fp, 'r') as f:\n ATOMIC_SCATTERING_PARAMS = json.load(f)\n\n d0 = (1/2/self.d_hkl)**2\n\n # obtiain scattering parameters, atomic numbers, and occus (need to look into occus)\n coeffs = []\n zs = []\n\n for elem in crystal.get_chemical_symbols():\n if elem == 'D':\n elem = 'H'\n c = ATOMIC_SCATTERING_PARAMS[elem]\n z = Element(elem).z\n coeffs.append(c)\n zs.append(z) \n\n coeffs = np.array(coeffs)\n self.peaks = {}\n two_thetas = []\n\n # self.march_parameter = 1\n TWO_THETA_TOL = 1e-5 # tolerance to find repeating angles\n SCALED_INTENSITY_TOL = 1e-5 # threshold for intensities\n \n\n for hkl, s2, theta, d_hkl in zip(self.hkl_list, d0, self.theta, self.d_hkl):\n \n # calculate the scattering factor sf\n g_dot_r = np.dot(crystal.get_scaled_positions(), np.transpose([hkl])).T[0]\n sf = zs - 41.78214 * s2 * np.sum(coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)\n \n # calculate the structure factor f\n f = np.sum(sf * np.exp(2j * np.pi * g_dot_r))\n \n # calculate the lorentz polarization factor lf\n lf = (1 + np.cos(2 * theta) ** 2) / (np.sin(theta) ** 2 * np.cos(theta))\n\n # calculate the preferred orientation factor\n if self.preferred_orientation != False:\n G = self.march_parameter\n po = ((G * np.cos(theta))**2 + 1/G * np.sin(theta)**2)**(-3/2) \n else:\n po = 1\n \n # calculate the intensity I\n I = (f * f.conjugate()).real\n \n # calculate 2*theta\n two_theta = np.degrees(2 * theta)\n \n # find where the scattered angles are equal\n ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) < TWO_THETA_TOL)\n\n # append intensity, hkl plane, and thetas to lists\n if len(ind[0]) > 0:\n self.peaks[two_thetas[ind[0][0]]][0] += I * lf * po\n self.peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))\n else:\n self.peaks[two_theta] = [I * lf * po, [tuple(hkl)],d_hkl]\n two_thetas.append(two_theta)\n\n # obtain important intensities (defined by SCALED_INTENSITY_TOL)\n # and corresponding 2*theta, hkl plane + multiplicity, and d_hkl\n # print(peaks.keys())\n max_intensity = max([v[0] for v in self.peaks.values()])\n x = []\n y = []\n hkls = []\n d_hkls = []\n count = 0 \n for k in sorted(self.peaks.keys()):\n count +=1\n v = self.peaks[k]\n fam = self.get_unique_families(v[1])\n if v[0] / max_intensity * 100 > SCALED_INTENSITY_TOL:\n x.append(k)\n y.append(v[0])\n \n hkls.append([{\"hkl\": hkl, \"multiplicity\": mult}\n for hkl, mult in fam.items()])\n d_hkls.append(v[2])\n\n self.theta2 = x\n self.xrd_intensity = y\n self.hkl_labels = hkls\n self.d_hkls = d_hkls", "def speedOfSound(gamma, R, T):\n\n a = np.sqrt(gamma*R*T)\n\n return a", "def sine_wave_generator(fs, t, spl_value, freq):\r\n\r\n # \"Peak\" value in Pascals (amplitude)\r\n p_ref = 2e-5\r\n pressure_rms = p_ref * (10.00 ** (spl_value / 20.00))\r\n\r\n # Sample range\r\n # samples = np.linspace(0, t, int(fs * t), endpoint=False)\r\n time = np.arange(0, t, 1 / fs)\r\n\r\n # Theta lets you specify the sine wave value at time 0\r\n theta = 0\r\n\r\n # Amplitude of the signal\r\n amplitude = np.sqrt(2) * pressure_rms\r\n\r\n # Signal calculation\r\n signal = np.array(amplitude * np.sin((2.00 * math.pi * freq * time) + theta))\r\n\r\n return signal, time", "def gyroHF(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple Froms can light up the same To, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n #print(\"\\nTEST2\")\n #print(GYRO.intersectRecord[0,0,0,1711])\n #print(Pgyro[1711])\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return", "def cloud_map(sky):\n cloud_map = {\n 'NSC': 0,\n 'NCD': 0,\n 'CLR': 0,\n 'FEW': 2,\n 'SCT': 6,\n 'BKN': 8,\n 'OVC': 10\n }\n return list(map(lambda s: (cloud_map[s[0]], s[1].value() if s[1] else 0), sky))", "def extract_heartrate_tsfresh(transformed: np.ndarray) -> np.ndarray:\n ecg_features = []\n print(\"Extracting TSFRESH statistics from heart rate signals...\")\n\n for x in tqdm(transformed):\n vchange_quantiles_abs = change_quantiles(x[:, -1], 0, 0.8, True, \"var\")\n vfft_aggregated_k = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"kurtosis\"}]))[0][1]\n vmean_abs_change = mean_abs_change(x[:, -1])\n vabsolute_sum_of_changes = absolute_sum_of_changes(x[:, -1])\n vfft_aggregated_s = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"skew\"}]))[0][1]\n vfft_aggregated_c = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"centroid\"}]))[0][1]\n vvariance = variance(x[:, -1])\n vvariation_coefficient = variation_coefficient(x[:, -1])\n\n new_tsfresh = np.array(\n [\n vchange_quantiles_abs,\n vfft_aggregated_k,\n vmean_abs_change,\n vabsolute_sum_of_changes,\n vfft_aggregated_s,\n vfft_aggregated_c,\n vvariance,\n vvariation_coefficient,\n ]\n )\n\n ecg_features.append(np.concatenate(new_tsfresh, axis=0))\n\n return np.array(ecg_features)", "def test_3d_steam_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n write_readback(dic,data)", "def _fog_by_index(\n self, index: int, val: Optional[Tuple[int, float]]\n ) -> Tuple[int, float]:\n if index < 0 or index >= 26 * 21:\n raise IndexError(\"invalid fog index\")\n\n colours = self.variables.setdefault(\"fog_trigger\", VariableArray(VariableUInt))\n if not isinstance(colours, VariableArray):\n raise ValueError(\"fog_trigger variable not an array\")\n pers = self.variables.setdefault(\"fog_per\", VariableArray(VariableFloat))\n if not isinstance(pers, VariableArray):\n raise ValueError(\"fog_per variable not an array\")\n\n while index >= len(colours):\n colours.append(0x111118)\n while index >= len(pers):\n pers.append(0.0)\n\n result = (colours[index], pers[index])\n if val is not None:\n colours[index], pers[index] = val\n return result", "def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp],256)\n for i in range(0, len(x)-framesamp, hopsamp)])\n X=X[:,0:128]\n return X" ]
[ "0.7263583", "0.55189", "0.5414309", "0.52412045", "0.5169021", "0.51627105", "0.51417047", "0.51059294", "0.5028249", "0.49997443", "0.49332586", "0.4930508", "0.49028358", "0.48790222", "0.48755518", "0.4864208", "0.4823804", "0.4810659", "0.4780731", "0.4776312", "0.47525737", "0.47469786", "0.46991336", "0.46919245", "0.467459", "0.46471933", "0.4637196", "0.4629898", "0.4619782", "0.46104607", "0.4598555", "0.4592912", "0.45928133", "0.4584871", "0.45781875", "0.45750624", "0.4571609", "0.4564531", "0.45631036", "0.4552259", "0.4535978", "0.451747", "0.45142245", "0.4513293", "0.4508917", "0.45076734", "0.44903588", "0.44898343", "0.44865003", "0.44835967", "0.44813004", "0.44812816", "0.44787747", "0.44745538", "0.44741926", "0.44740832", "0.44631377", "0.4453473", "0.4452899", "0.44518942", "0.4449467", "0.44462183", "0.44447076", "0.4440298", "0.4429219", "0.44283435", "0.4427696", "0.44205365", "0.4413398", "0.44116867", "0.44014966", "0.4401219", "0.43966615", "0.4393864", "0.4389235", "0.4388291", "0.43869844", "0.43807518", "0.43797964", "0.43780613", "0.43755233", "0.4366843", "0.43637922", "0.43603718", "0.436", "0.43577966", "0.43502513", "0.4349084", "0.43441093", "0.4341495", "0.43411", "0.4338992", "0.43343738", "0.4332322", "0.43322086", "0.43242142", "0.43231973", "0.4320018", "0.43162763", "0.43128467" ]
0.82942164
0
Calculates average sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and between frequencies f1 and f2 (in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then avergae Tsky is calculated for each frequency range f0f1, f1f2,... in the array, and returned value is list of average Tsky's. The size of the returned array is less by 1 than the size of freqs.
def tsky_range(gl, gb, f1, f2, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tot=0 for ii in xrange(101): freq = f1 + ii*(f2-f1)/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. return tot else: temps=[] for ff in xrange(1, len(freqs)): tot = 0 for ii in xrange(101): freq = freqs[ff-1] + ii*(freqs[ff]-freqs[ff-1])/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. temps.append(tot) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky(gl, gb, freq, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b = 179\n\tl = int(gl + 0.5)\n\tif gl >= 360: l = 0\n\tl = int((l / 4))\n\t\n\tif freqs == None:\n\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\treturn tsky\n\telse:\n\t\ttemps=[]\n\t\tfor freq in freqs:\n\t\t\ttsky = 2.7 + nsky[l,b] * (freq/408.0)**(index)\n\t\t\ttemps.append(tsky)\n\t\treturn temps", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)", "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def average_fft(ut: np.ndarray) -> np.ndarray:\n\n # We average over each row of ut.\n ut_average = np.average(ut, axis=0) # shape (262144,)\n\n return ut_average", "def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS", "def hotaverage( names):\n rs = radioastronomy.Spectrum() # create input and average structures\n nhot = 0\n\n avenames = names # create a list of files to average\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'HOT': # speed up by only looking at hot load files\n continue\n \n rs.read_spec_ast(filename)\n\n if rs.telel > 0: # only working with hot load, skip elevation > 0.\n continue\n\n avenames[nhot] = filename\n nhot = nhot + 1\n # end of for all files loop\n\n nhot, hot = average( avenames[0:nhot]) # now use generic program for averages\n if nhot < 1:\n print 'No hot load files; can not calibrate!'\n exit()\n\n return nhot, hot", "def gtgram(\n wave, fs, window_time, hop_time, channels, f_min, f_max=None, return_freqs=False\n):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gtgram_strides(fs, window_time, hop_time, xe.shape[1])\n\n y = np.zeros((channels, ncols))\n\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n\n if return_freqs:\n cfs = centre_freqs(fs, channels, f_min, f_max)\n return cfs, y\n return y", "def Gamma_per_grain(ZZall, Gamma_a_Z, ZZ_fz, fdist, GG):\n\n # index in the ZZall array for the charges in ZZ_fz\n zi_down = np.where(ZZall == ZZ_fz[0])[0][0]# find the index of the ZZ_fz[0] in ZZall \n zi_up = np.where(ZZall == ZZ_fz[-1])[0][0]# find the index of the ZZ_fz[-1] in ZZall\n \n #Gamma_pe_a = np.sum(fz*Gamma_dotdot_scaled[zi_down:zi_up+1])\n Gamma_pe_a = np.sum(fdist*Gamma_a_Z[zi_down:zi_up+1])\n \n return Gamma_pe_a", "def trialAverage(dffTraceAllRoi, bgIndex):\n # each element in trialAvgAllRoi is an epoch\n # then each epoch has a single list\n # this list contains arrays of trial averages for every roi\n # if bg, instead of an array, it has NaN\n trialAvgAllRoi = {}\n for epoch in dffTraceAllRoi:\n trialAvgAllRoi[epoch] = []\n for roi in range(len(dffTraceAllRoi[epoch])):\n trialLengths = []\n # if Bg, append NaN to trialLengths\n # this way you dont distrupt bgIndex in the future\n if roi == bgIndex:\n trialLengths.append(numpy.nan)\n else:\n for trial in dffTraceAllRoi[epoch][roi]:\n trialLengths.append(len(trial))\n\n if trialLengths[0] is not numpy.nan:\n # real ROI case, not bg\n minTrialLen = min(trialLengths)\n trialFit = 0\n for trial in dffTraceAllRoi[epoch][roi]:\n trialFit += trial[:minTrialLen]\n # calculate the trial average for an roi\n trialAvg = trialFit/len(dffTraceAllRoi[epoch][roi])\n\n trialAvgAllRoi[epoch].append(trialAvg)\n\n elif trialLengths[0] is numpy.nan:\n # bgRoi case\n trialAvgAllRoi[epoch].append(numpy.nan)\n\n return trialAvgAllRoi", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def analyze2(ys, freqs, ts):", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def compute_ctf(freqs,rots,akv,cs,wgh,dfmid1f,dfmid2f,angastf,dscale,bfactor=None): \n av = akv * 1e3 # Convert kilovots to volts\n cs = cs * 1e7 # Convert spherical aberation from mm to A\n \n # wavelength of electrons\n elambda = 12.2643247 / n.sqrt(av + av**2 * 0.978466e-6)\n \n wgh1 = dscale*n.sqrt(1.0 - wgh**2)\n wgh2 = dscale*wgh\n\n ix = freqs[:,0]\n iy = freqs[:,1]\n freq_radius = n.sqrt(ix**2 + iy**2)\n\n angle = elambda*freq_radius\n angspt = n.arctan2(iy,ix)\n if rots is not None:\n angspt = n.mod(angspt.reshape((-1,1)) + rots.reshape((1,-1)),2.0*n.pi)\n angle = angle.reshape((-1,1)) \n c1 = 2.0*n.pi*angle**2/(2.0*elambda)\n c2 = -c1*cs*angle**2/2.0\n angdif = angspt - angastf\n ccos = n.cos(2.0*angdif)\n df = 0.5*(dfmid1f + dfmid2f + ccos*(dfmid1f-dfmid2f))\n chi = c1*df + c2\n\n ctf = -wgh1*n.sin(chi) - wgh2*n.cos(chi)\n \n if bfactor is not None:\n ctf *= envelope_function(freq_radius, bfactor)\n\n return n.require(ctf,dtype = freqs.dtype)", "def gps2tas(GS, TK, verbose=0):\n # confirm GS and TK are valid lengths:\n if 2 < len(GS) < 5:\n pass\n else:\n raise ValueError(\"GS must be a list of three or four items\")\n\n if 2 < len(TK) < 5:\n pass\n else:\n raise ValueError(\"TK must be a list of three or four items\")\n\n if len(GS) != len(TK):\n raise ValueError(\n \"The ground speed and track arrays must have the same number of elements.\"\n )\n\n if len(GS) == 3:\n result = gps2tas3(GS, TK, verbose)\n return result\n else:\n gs_data_sets, tk_data_sets, results = [], [], []\n\n gs_data_sets.append([GS[0], GS[1], GS[2]])\n gs_data_sets.append([GS[1], GS[2], GS[3]])\n gs_data_sets.append([GS[2], GS[3], GS[0]])\n gs_data_sets.append([GS[3], GS[0], GS[1]])\n\n tk_data_sets.append([TK[0], TK[1], TK[2]])\n tk_data_sets.append([TK[1], TK[2], TK[3]])\n tk_data_sets.append([TK[2], TK[3], TK[0]])\n tk_data_sets.append([TK[3], TK[0], TK[1]])\n\n for (gs, tk) in zip(gs_data_sets, tk_data_sets):\n results.append(gps2tas3(gs, tk, 2))\n\n ave_TAS = 0\n ave_wind_x = 0\n ave_wind_y = 0\n sum2_TAS = 0\n\n for item in results:\n ave_TAS += item[0]\n sum2_TAS += item[0] ** 2\n ave_wind_x += item[1][0] * M.sin(M.pi * item[1][1] / 180.0)\n ave_wind_y += item[1][0] * M.cos(M.pi * item[1][1] / 180.0)\n\n ave_TAS /= 4.0\n std_dev_TAS = M.sqrt((sum2_TAS - 4 * ave_TAS ** 2) / 3)\n ave_wind_x /= 4\n ave_wind_y /= 4.0\n ave_wind_speed = M.sqrt(ave_wind_x ** 2 + ave_wind_y ** 2)\n ave_wind_dir = (720.0 - (180.0 / M.pi * M.atan2(ave_wind_x, ave_wind_y))) % 360\n # return results\n\n if verbose == 0:\n return ave_TAS\n elif verbose == 1:\n return ave_TAS, std_dev_TAS\n elif verbose == 2:\n return (\n ave_TAS,\n std_dev_TAS,\n (\n (results[0][1][0], results[0][1][1]),\n (results[1][1][0], results[1][1][1]),\n (results[2][1][0], results[2][1][1]),\n (results[3][1][0], results[3][1][1]),\n ),\n )\n else:\n raise ValueError(\"The value of verbose must be equal to 0, 1 or 2\")", "def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def calculate_boltzmann_average(energy, temperature, kb=0.0019872041):\n beta = 1 / (kb * temperature)\n F = np.array(energy)\n Ptot = np.exp(-F * beta)\n P = Ptot / Ptot.sum()\n F_avg = (P * F).sum()\n return F_avg", "def at_frequencies(\n self,\n freqs,\n inplace=True,\n freq_interp_kind=\"cubic\",\n nan_handling=\"clip\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n atol=None,\n ):\n sky = self if inplace else self.copy()\n\n if atol is None:\n atol = self.freq_tol\n\n if self.spectral_type == \"spectral_index\":\n sky.stokes = (\n self.stokes\n * (freqs[:, None].to(\"Hz\") / self.reference_frequency[None, :].to(\"Hz\"))\n ** self.spectral_index[None, :]\n )\n sky.reference_frequency = None\n elif self.spectral_type == \"full\":\n # Find a subset of the current array.\n ar0 = self.freq_array.to_value(\"Hz\")\n ar1 = freqs.to_value(\"Hz\")\n tol = atol.to_value(\"Hz\")\n matches = np.fromiter(\n (np.isclose(freq, ar1, atol=tol).any() for freq in ar0), dtype=bool\n )\n\n if np.sum(matches) != freqs.size:\n raise ValueError(\n \"Some requested frequencies are not present in the current SkyModel.\"\n )\n sky.stokes = self.stokes[:, matches, :]\n if sky.freq_edge_array is not None:\n sky.freq_edge_array = sky.freq_edge_array[:, matches]\n elif self.spectral_type == \"subband\":\n if np.max(freqs.to(\"Hz\")) > np.max(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is larger than the highest subband frequency.\"\n )\n if np.min(freqs.to(\"Hz\")) < np.min(self.freq_array.to(\"Hz\")):\n raise ValueError(\n \"A requested frequency is smaller than the lowest subband frequency.\"\n )\n # Interpolate. Need to be careful if there are NaNs -- they spoil the\n # interpolation even for sources that do not have any NaNs.\n stokes_unit = self.stokes.unit\n if np.any(np.isnan(self.stokes.value)):\n allowed_nan_handling = [\"propagate\", \"interp\", \"clip\"]\n if nan_handling not in allowed_nan_handling:\n raise ValueError(\n f\"nan_handling must be one of {allowed_nan_handling}\"\n )\n\n message = \"Some stokes values are NaNs.\"\n if nan_handling == \"propagate\":\n message += (\n \" All output stokes values for sources with any NaN values \"\n \"will be NaN.\"\n )\n else:\n message += \" Interpolating using the non-NaN values only.\"\n message += (\n \" You can change the way NaNs are handled using the \"\n \"`nan_handling` keyword.\"\n )\n warnings.warn(message)\n stokes_arr = self.stokes.value\n freq_arr = self.freq_array.to(\"Hz\").value\n at_freq_arr = freqs.to(\"Hz\").value\n # first interpolate any that have no NaNs\n wh_nan = np.nonzero(np.any(np.isnan(stokes_arr), axis=(0, 1)))[0]\n wh_non_nan = np.nonzero(np.all(~np.isnan(stokes_arr), axis=(0, 1)))[0]\n assert wh_non_nan.size + wh_nan.size == self.Ncomponents, (\n \"Something went wrong with spliting sources with NaNs. This is a \"\n \"bug, please make an issue in our issue log\"\n )\n new_stokes = np.zeros(\n (4, freqs.size, self.Ncomponents), dtype=stokes_arr.dtype\n )\n if wh_non_nan.size > 0:\n finterp = scipy.interpolate.interp1d(\n freq_arr,\n stokes_arr[:, :, wh_non_nan],\n axis=1,\n kind=freq_interp_kind,\n )\n new_stokes[:, :, wh_non_nan] = finterp(at_freq_arr)\n\n if nan_handling == \"propagate\":\n new_stokes[:, :, wh_nan] = np.NaN\n else:\n wh_all_nan = []\n wh_nan_high = []\n wh_nan_low = []\n wh_nan_many = []\n for comp in wh_nan:\n freq_inds_use = np.nonzero(\n np.all(~np.isnan(stokes_arr[:, :, comp]), axis=0)\n )[0]\n if freq_inds_use.size == 0:\n new_stokes[:, :, comp] = np.NaN\n wh_all_nan.append(comp)\n continue\n at_freq_inds_use = np.arange(freqs.size)\n\n if np.max(at_freq_arr) > np.max(freq_arr[freq_inds_use]):\n at_freq_inds_use = np.nonzero(\n at_freq_arr <= np.max(freq_arr[freq_inds_use])\n )[0]\n at_freqs_large = np.nonzero(\n at_freq_arr > np.max(freq_arr[freq_inds_use])\n )[0]\n wh_nan_high.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_large, comp] = np.NaN\n else: # clip\n large_inds_use = np.full(\n (at_freqs_large.size), freq_inds_use[-1]\n )\n new_stokes[:, at_freqs_large, comp] = stokes_arr[\n :, large_inds_use, comp\n ]\n\n if np.min(at_freq_arr) < np.min(freq_arr[freq_inds_use]):\n at_freq_inds_use_low = np.nonzero(\n at_freq_arr >= np.min(freq_arr[freq_inds_use])\n )[0]\n at_freq_inds_use = np.intersect1d(\n at_freq_inds_use, at_freq_inds_use_low\n )\n at_freqs_small = np.nonzero(\n at_freq_arr < np.min(freq_arr[freq_inds_use])\n )[0]\n wh_nan_low.append(comp)\n if nan_handling == \"interp\":\n new_stokes[:, at_freqs_small, comp] = np.NaN\n else: # clip\n small_inds_use = np.full(\n (at_freqs_small.size), freq_inds_use[0]\n )\n new_stokes[:, at_freqs_small, comp] = stokes_arr[\n :, small_inds_use, comp\n ]\n\n if at_freq_inds_use.size > 0:\n try:\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=freq_interp_kind,\n )\n except ValueError:\n wh_nan_many.append(comp)\n finterp = scipy.interpolate.interp1d(\n freq_arr[freq_inds_use],\n stokes_arr[:, freq_inds_use, comp],\n axis=1,\n kind=\"linear\",\n )\n new_stokes[:, at_freq_inds_use, comp] = finterp(\n at_freq_arr[at_freq_inds_use]\n )\n else:\n continue\n if len(wh_all_nan) > 0:\n warnings.warn(\n f\"{len(wh_all_nan)} components had all NaN stokes values. \"\n \"Output stokes for these components will all be NaN.\"\n )\n if len(wh_nan_high) > 0:\n message = (\n f\"{len(wh_nan_high)} components had all NaN stokes values \"\n \"above one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the highest frequency \"\n \"without a NaN for these components at these \"\n \"frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_low) > 0:\n message = (\n f\"{len(wh_nan_low)} components had all NaN stokes values below \"\n \"one or more of the requested frequencies. \"\n )\n if nan_handling == \"interp\":\n message += (\n \"The stokes for these components at these frequencies \"\n \"will be NaN.\"\n )\n else:\n message += (\n \"Using the stokes value at the lowest frequency \"\n \"without a NaN for these components at these frequencies.\"\n )\n warnings.warn(message)\n if len(wh_nan_many) > 0:\n warnings.warn(\n f\"{len(wh_nan_many)} components had too few non-NaN stokes \"\n \"values for chosen interpolation. Using linear \"\n \"interpolation for these components instead.\"\n )\n sky.stokes = new_stokes * stokes_unit\n else:\n finterp = scipy.interpolate.interp1d(\n self.freq_array.to(\"Hz\").value,\n self.stokes.value,\n axis=1,\n kind=freq_interp_kind,\n )\n sky.stokes = finterp(freqs.to(\"Hz\").value) * stokes_unit\n else:\n # flat spectrum\n stokes_unit = self.stokes.unit\n sky.stokes = np.repeat(self.stokes.value, len(freqs), axis=1) * stokes_unit\n\n sky.reference_frequency = None\n sky.Nfreqs = freqs.size\n sky.freq_array = freqs\n if sky.spectral_type == \"subband\" and sky.freq_edge_array is not None:\n sky.freq_edge_array = None\n sky.spectral_type = \"full\"\n if sky.frame_coherency is not None:\n sky.coherency_radec = sky.calc_frame_coherency()\n\n if run_check:\n sky.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if not inplace:\n return sky", "def _fog_by_index(\n self, index: int, val: Optional[Tuple[int, float]]\n ) -> Tuple[int, float]:\n if index < 0 or index >= 26 * 21:\n raise IndexError(\"invalid fog index\")\n\n colours = self.variables.setdefault(\"fog_trigger\", VariableArray(VariableUInt))\n if not isinstance(colours, VariableArray):\n raise ValueError(\"fog_trigger variable not an array\")\n pers = self.variables.setdefault(\"fog_per\", VariableArray(VariableFloat))\n if not isinstance(pers, VariableArray):\n raise ValueError(\"fog_per variable not an array\")\n\n while index >= len(colours):\n colours.append(0x111118)\n while index >= len(pers):\n pers.append(0.0)\n\n result = (colours[index], pers[index])\n if val is not None:\n colours[index], pers[index] = val\n return result", "def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg", "def getAvgTemp(self, typeSpec, blockList=None, flux2Weight=False):\n num = 0.0\n denom = 0.0\n if not blockList:\n blockList = list(self.getBlocks())\n\n for b in blockList:\n if flux2Weight:\n weight = b.p.flux**2.0\n else:\n weight = 1.0\n for c in b.iterComponents(typeSpec):\n vol = c.getVolume()\n num += c.temperatureInC * vol * weight\n denom += vol * weight\n\n if denom:\n return num / denom\n else:\n raise RuntimeError(\"no temperature average for {0}\".format(typeSpec))", "def anharm_freq(freqs, xmat):\n anharms = np.zeros(len(freqs))\n for i, freq in enumerate(freqs):\n anharms[i] = freq\n anharms[i] += 2. * xmat[i][i]\n tmp = 0\n for j in range(len(freqs)):\n if j != i:\n tmp += xmat[i][j]\n\n anharms[i] += 1./2 * tmp\n\n return anharms", "def element_area_and_temperature(freq_hz):\n # Element noise data.\n noise_data = {\n 'freqs': [0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9,\n 0.45e9, 0.55e9, 0.65e9],\n 'a_eff': [1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956,\n 0.2046, 0.1384, 0.0792],\n 't_sys': [4.0409e3, 1.5029e3, 0.6676e3, 0.2936e3, 0.1402e3, 0.0873e3,\n 0.0689e3, 0.0607e3, 0.0613e3]\n }\n log_freq = numpy.log10(freq_hz)\n freqs = numpy.array(noise_data['freqs'])\n a_eff = numpy.array(noise_data['a_eff'])\n t_sys = numpy.array(noise_data['t_sys'])\n f_cut = 2\n\n # Interpolate to get effective area.\n if freq_hz <= freqs[f_cut]:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[:f_cut+1]), \n numpy.log10(a_eff[:f_cut+1]), kind='slinear')\n a_eff = 10**f(log_freq)\n else:\n f = scipy.interpolate.interp1d(numpy.log10(freqs[f_cut:]), \n numpy.log10(a_eff[f_cut:]), kind='cubic')\n a_eff = 10**f(log_freq)\n\n # Interpolate to get system temperature.\n f = scipy.interpolate.interp1d(numpy.log10(freqs), \n numpy.log10(t_sys), kind='cubic')\n t_sys = 10**f(log_freq)\n return a_eff, t_sys", "def average(self, times=2):\n for i in range(times):\n self.statistics()\n global t, avlist\n length = len(t)\n avlist.append(t)\n t = []\n\n total_list = []\n\n for l in range(length):\n total_list.append([])\n\n for j in range(times):\n \"\"\"per time\"\"\"\n for i in range(length):\n total_list[i].append(avlist[j][i])\n\n \"\"\"calculate\"\"\"\n ylist = []\n avlist = []\n for a in total_list:\n avg = 0\n for b in a:\n avg += b\n ylist.append(avg/times)\n self.listy = ylist\n\n for e in range(self.el[self.re[0]], self.re[1], self.re[2]):\n self.listx.append(e)", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def avg_temps(self):\r\n average_temp = 0\r\n for j in range(len(self.trip)):\r\n average_temp += self.trip[j].get_temperature(j)\r\n average_temp /= len(self.trip)\r\n return average_temp", "def CreateAccAvg(self):\n \n if len(self.Accel[\"X\"]) is 0:\n raise ValueError(\"Accel Category is empty\")\n self.AccelAvg = []\n\n for item in range(len(self.Accel[\"X\"])):\n #for axis in [\"X\",\"Y\",\"Z\"]:\n # if type(self.Accel[axis][item]) != type(123.345):\n # raise ValueError(\"non-number included in Accel bank. Use formatAllToFloat() to remove strings.\")\n self.AccelAvg.append((float(self.Accel[\"X\"][item]) + float(self.Accel[\"Y\"][item]) + float(self.Accel[\"Z\"][item])) / 3)", "def global_average(x, batch_lengths):\n\n # Loop over the clouds of the batch\n averaged_features = []\n i0 = 0\n for b_i, length in enumerate(batch_lengths):\n\n # Average features for each batch cloud\n averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))\n\n # Increment for next cloud\n i0 += length\n\n # Average features in each batch\n return torch.stack(averaged_features)", "def infilRateGA(Ks, presHead, thetaSat, thetaInit, F, tp):\n numerator = Ks*np.absolute(presHead)*(thetaSat - thetaInit)\n fraction = numerator/F\n f = Ks + fraction\n\n return f", "def AvgALPflux(self, EMeV, t_sec, g11):\n t_sec_array = np.arange(0.,t_sec,0.1)\n na_dedt = self.ALPflux(EMeV, t_sec_array, g11)\n na_dedt_avg = simps(na_dedt, t_sec_array, axis = 1) / t_sec\n return na_dedt_avg", "def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line", "def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes", "def brightness_temperature(frequency, beam_area=None):\n if frequency.unit.is_equivalent(si.sr):\n if not beam_area.unit.is_equivalent(si.Hz):\n raise ValueError(\n \"The inputs to `brightness_temperature` are frequency and angular area.\"\n )\n warnings.warn(\n \"The inputs to `brightness_temperature` have changed. \"\n \"Frequency is now the first input, and angular area \"\n \"is the second, optional input.\",\n AstropyDeprecationWarning,\n )\n frequency, beam_area = beam_area, frequency\n\n nu = frequency.to(si.GHz, spectral())\n factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value\n factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value\n\n if beam_area is not None:\n beam = beam_area.to_value(si.sr)\n\n def convert_Jy_to_K(x_jybm):\n return x_jybm / beam / factor_Jy\n\n def convert_K_to_Jy(x_K):\n return x_K * beam / factor_K\n\n return Equivalency(\n [\n (astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy),\n (astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy),\n ],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )\n else:\n\n def convert_JySr_to_K(x_jysr):\n return x_jysr / factor_Jy\n\n def convert_K_to_JySr(x_K):\n return x_K / factor_K # multiplied by 1x for 1 steradian\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)],\n \"brightness_temperature\",\n {\"frequency\": frequency, \"beam_area\": beam_area},\n )", "def makespectfile(afile):\n x = []\n y = []\n with open(afile) as f:\n for line in f:\n if line.startswith('#'): continue\n (freq,flux) = line.split()\n x.append(float(freq))\n y.append(float(flux))\n return (np.asarray(x),np.asarray(y))", "def compute_single_ph_gfunc(\n obj: floquet_analysis.FloquetAnalyzer,\n freqs: np.array) -> np.ndarray:\n # Compute the decay operator and the convolved excitation operator within\n # the single-excitation subspace.\n decay_op = obj.decay_op(1)\n ex_op_conv = obj.ex_op_conv(1, freqs)\n gfunc_time = np.matmul(\n decay_op[np.newaxis, :, :, :], ex_op_conv)[:, :, 0, 0]\n return gfunc_time, np.fft.fftshift(np.fft.ifft(gfunc_time, axis=1), axes=1)", "def spectralSmoothing(spectrum, f_upper, f_lower):\n smoothed_array = np.zeros(len(f_upper))\n for i in range(len(smoothed_array)):\n # if statement is required since clever indexing isn't that clever.\n if f_upper[i] == f_lower[i]:\n smoothed_array[i] = spectrum[f_lower[i]]\n else:\n smooth_values = spectrum[f_lower[i]:(f_upper[i] + 1)]\n smoothed_array[i] = np.mean(smooth_values)\n return smoothed_array", "def _foi_average(conn, foi_idx):\n # get the number of foi\n n_foi = foi_idx.shape[0]\n\n # get input shape and replace n_freqs with the number of foi\n sh = list(conn.shape)\n sh[-2] = n_foi\n\n # compute average\n conn_f = np.zeros(sh, dtype=conn.dtype)\n for n_f, (f_s, f_e) in enumerate(foi_idx):\n conn_f[..., n_f, :] = conn[..., f_s:f_e, :].mean(-2)\n return conn_f", "def analyze1(ys, freqs, ts):\n args = numpy.outer(ts, freqs)\n M = numpy.exp(i * PI2 * args)\n amps = numpy.linalg.solve(M, ys)\n return amps", "def gtgram(wave,fs,window_time, hop_time,channels,f_min,f_max):\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gt.gtgram_strides(fs,window_time, hop_time, xe.shape[1])\n y = np.zeros((channels, ncols))\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n return y", "def average_grad(self):\n\n # Compute the respective gradients\n grad_line_1 = self.gradient(0,1)\n grad_line_2 = self.gradient(2,3)\n\n a1 = np.abs(np.arctan(grad_line_1))\n a2 = np.abs(np.arctan(grad_line_2))\n\n ave_grad = np.tan((a1+a2)/2)\n\n #ave_grad = np.average([grad_line_1,grad_line_2]) # Compute the average gradient\n\n return ave_grad", "def get_spectrum_freq(self):\n if not self.is_a_spectrum_file():\n raise TelemacException(\\\n \"This file does not seem to be a spectrum file\")\n\n nfreq = 0\n eps = 1e-6\n f_1 = 10e10\n f_2 = 10e10\n raisf = 0.\n for x, y in zip(self.meshx, self.meshy):\n if abs(x) <= eps and y >= 0.:\n nfreq += 1\n f_temp = y\n if f_temp < f_1:\n f_2 = f_1\n f_1 = f_temp\n elif f_temp < f_2:\n f_2 = f_temp\n\n raisf = f_2/f_1\n\n freqs = [f_1 * raisf**i for i in range(nfreq)]\n\n dfreqs = np.zeros(nfreq, dtype=np.float64)\n\n auxi = (raisf - 1.)/2.\n dfreqs[0] = auxi*freqs[0]\n for i in range(1, nfreq-1):\n dfreqs[i] = auxi*(freqs[i] + freqs[i-1])\n\n dfreqs[-1] = auxi*freqs[-2]\n\n return np.array(freqs), dfreqs", "def chroma_stft(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.chroma_stft(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def _filter_frequencies(self):\n import scipy.signal as spsg\n freq_bands = ['alpha', 'beta', 'gamma']\n if len(freq_bands) != self.n_bands:\n raise ValueError('Rename frequency bands')\n freqs_ts = np.empty([0, self.total_trials, self.ms, self.n_raw_features])\n for i_band in range(self.n_bands):\n freq_band = freq_bands[i_band]\n\n if freq_band == 'alpha':\n low_f = 8./self.sampling_freq\n high_f = 15./self.sampling_freq\n elif freq_band == 'beta':\n # beta\n low_f = 15./self.sampling_freq\n high_f = 32./self.sampling_freq\n elif freq_band == 'gamma':\n # gamma\n low_f = 32./self.sampling_freq\n high_f = 80./self.sampling_freq\n else:\n raise NameError('unknown filter')\n\n b, a = spsg.iirfilter(self.band_filter_order, [low_f, high_f],\n btype='bandpass', ftype='butter', output='ba')\n # ts_data: (trials, t, n)\n filtered_ts = spsg.filtfilt(b, a, self.ts_data, axis=-2)\n freqs_ts = np.concatenate((freqs_ts, np.array([filtered_ts])))\n\n return freqs_ts", "def average_gradients(grad_list):\n average_grads = []\n for grad_and_vars in zip(*grad_list):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads", "def average_grads(all_grads):\n\n nr_tower = len(all_grads)\n if nr_tower == 1:\n return all_grads[0]\n\n new_all_grads = [] # NVar * NGPU * 2\n with tf.name_scope('AvgGrad'):\n for grad_and_vars in zip(*all_grads):\n # Ngpu * 2\n grads = [g for (g, _) in grad_and_vars]\n summed = tf.multiply(tf.add_n(grads), 1.0 / nr_tower)\n\n grads_for_a_var = []\n for (_, v), g in zip(grad_and_vars, [summed]*nr_tower):\n grads_for_a_var.append((g, v))\n new_all_grads.append(grads_for_a_var)\n\n ret = [list(k) for k in zip(*new_all_grads)]\n return ret", "def average_gradients(self, tower_grads):\n average_grads = []\n\n # get variable and gradients in differents gpus\n for grad_and_vars in zip(*tower_grads):\n # calculate the average gradient of each gpu\n grads = []\n for g, _ in grad_and_vars:\n expanded_g = tf.expand_dims(g, 0)\n grads.append(expanded_g)\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads", "def zenith_gain(freq):\n parfile = open(project_path\n + \"DSS-28_technical/efficiency_vs_freq_pars.pkl\",\"r\")\n pars = cPickle.load(parfile)\n parfile.close()\n effic = {}\n avg_effic = 0\n for key in list(pars.keys()):\n effic[key] = pars[key](freq)/100.\n avg_effic += effic[key]\n # right now I don't know what Pol A and Pol B are\n avg_effic /= len(list(pars.keys()))\n return avg_effic", "def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all", "def flux_sensitivity(freq_hz, t_acc=5, bw_hz=100e3, num_antennas=256, eta=1,\n t_sys=None, a_eff=None):\n t_sys = system_temp(freq_hz) if t_sys is None else t_sys\n a_eff = (element_effective_area(freq_hz) * num_antennas) if a_eff is None \\\n else a_eff\n sefd = (2 * const.k_B.value * t_sys * eta) / a_eff\n sigma_s = (sefd * 1e26) / sqrt(2 * bw_hz * t_acc)\n return sigma_s", "def test_aft_equals1(self, test_peak_idx):\n test_data = self.get_test_peaks(self.n_top)\n test_data[test_peak_idx]['area_per_channel'][:self.n_top] = 1\n test_data[test_peak_idx]['area'] = np.sum(test_data[test_peak_idx]['area_per_channel'])\n peaks = self.peaks_basics_compute(test_data)\n assert peaks[test_peak_idx]['area_fraction_top'] == 1", "def extract_boft(self, min_freq=0.06, max_freq=0.66, bank=8, *args, **kwargs):\n # First generate the wavelets\n target_hz = self.sampling_freq\n freqs = np.geomspace(min_freq, max_freq, bank)\n wavs, hzs = [], []\n for i, f in enumerate(freqs):\n wav = np.real(wavelet(f, sampling_freq=target_hz))\n wavs.append(wav)\n hzs.append(str(np.round(freqs[i], 2)))\n wavs = np.array(wavs)[::-1]\n hzs = np.array(hzs)[::-1]\n # # check asymptotes at lowest freq\n # asym = wavs[-1,:10].sum()\n # if asym > .001:\n # print(\"Lowest frequency asymptotes at %2.8f \" %(wavs[-1,:10].sum()))\n\n # Convolve data with wavelets\n Feats2Use = self.columns\n feats = pd.DataFrame()\n for feat in Feats2Use:\n _d = self[[feat]].T\n assert _d.isnull().sum().any() == 0, \"Data contains NaNs. Cannot convolve. \"\n for iw, cm in enumerate(wavs):\n convolved = np.apply_along_axis(\n lambda m: np.convolve(m, cm, mode=\"full\"), axis=1, arr=_d.values\n )\n # Extract bin features.\n out = pd.DataFrame(convolved.T).apply(calc_hist_auc, args=(None))\n # 6 bins hardcoded from calc_hist_auc\n colnames = [\n \"pos\" + str(i) + \"_hz_\" + hzs[iw] + \"_\" + feat for i in range(6)\n ]\n colnames.extend(\n [\"neg\" + str(i) + \"_hz_\" + hzs[iw] + \"_\" + feat for i in range(6)]\n )\n out = out.T\n out.columns = colnames\n feats = pd.concat([feats, out], axis=1)\n return self.__class__(\n feats, sampling_freq=self.sampling_freq, features=self.features\n )", "def average_grads(self, grads):\n # quick path: only one device, just return the grads\n if len(grads) == 1:\n return grads[0]\n\n # slow path: multi-GPUs\n else:\n with tf.device(self.main_device), tf.name_scope('average_grads'):\n return average_gradients(grads)", "def load_average(self):\n return _favg(self.load_samples)", "def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int", "def daily_temp(dft):\n\n tavg = (dft[\"T_Max\"] + dft[\"T_Min\"]) / 20 # tenths of degree C\n return tavg", "def averageTrialsByTriggers(trigger_indices, np_data):\n trialLen = trigger_indices[1] -trigger_indices[0] -1\n data_avg = [] \n data_std = [] \n\n for i in trigger_indices:\n data_avg.append(numpy.average(np_data[i+1:i+trialLen-1])) \n data_std.append(numpy.std(np_data[i+1:i+trialLen-1])) \n \n return (data_avg, data_std)", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def gpower_integrand(self,theta,phi):\n\n wavelength = 299.9/self.frequency\n if(phi == 0): phi = .00001\n if(phi == np.pi): phi = np.pi - .00001\n\n self.aa.alt = np.pi/2.0 - theta\n self.aa.az = np.pi/2.0 - phi\n\n coords = self.aa.raDec(self.__lst_current,self.location)\n\n coords = self.Rotator(np.pi/2 - coords.dec,coords.ra)\n\n Tsky = hp.get_interp_val(self.hpmap,coords[0],coords[1])*(self.frequency/408.0)**(-2.55)\n\n ans = self.lofasm.beam_pattern(theta,phi,[0,0,1])\n ans += self.lofasm.beam_pattern(theta,phi,[0,1,0]) \n ans *= (Tsky*(1.3804e-23)/wavelength**2)/(1e-26)/2.0\n\n return ans", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def stochastic_average(h, a, alpha, numsteps, numiter):\n #Same times for all trajectories\n times = zeros(numsteps+1)\n trajectories = zeros((numiter, numsteps+1, 2))\n\n #Initial phase\n phase = random.uniform(0, 1)\n for i in range(numiter):\n times, trajectories[i] = EulerInteg(h, a, alpha, numsteps, phase)\n\n stochastic_av = zeros((numsteps+1, 2))\n for j in range(numsteps+1):\n average_pos = array([0., 0.])\n for k in range(numiter):\n average_pos = average_pos + trajectories[k][j]\n stochastic_av[j] = average_pos/numiter\n\n return times, stochastic_av", "def A_TT_fg(self, L, fCfg):\n if L>2.*self.CMB.lMaxT:\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>self.CMB.lMaxT:\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.f_TT_fg(l1, l2, phi, fCfg) * self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.A_TT.__func__, \"integ\"):\n self.A_TT.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(self.CMB.lMaxT)], [0., np.pi]])\n self.A_TT.integ(integrand, nitn=8, neval=1000)\n\n result = self.A_TT.integ(integrand, nitn=1, neval=5000)\n# result = self.A_TT.integ(integrand, nitn=8, neval=5000)\n# result = self.A_TT.integ(integrand, nitn=4, neval=1000)\n\n result = result.mean\n\n # multiply by N^{0 phi}, to get dimensionless multiplicative bias\n result *= self.fN_phi_TT(L)\n \n if not np.isfinite(result):\n result = 0.\n return result", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def average_form_factors(qz_lists, F_lists): \n if len(qz_lists) < 2:\n raise TypeError('Need more than one form factor set for averaging')\n if len(qz_lists) != len(F_lists):\n raise TypeError('Number of qz and F data sets must agree')\n for qzvalues, Fvalues in zip(qz_lists, F_lists):\n if len(qzvalues) != len(Fvalues):\n raise TypeError('Length of each qz and F data set must agree') \n \n qz_bin, F_bin = create_binned_data(qz_lists, F_lists)\n normalize_to_each_other(F_bin)\n qz_bin = np.array(qz_bin)\n F_bin = np.array(F_bin)\n avg_qz = np.mean(qz_bin, axis=1)\n err_qz = np.std(qz_bin, axis=1, ddof=1, dtype=np.float64)\n avg_F = np.mean(F_bin, axis=1) \n err_F = np.std(F_bin, axis=1, ddof=1, dtype=np.float64) \n \n return avg_qz, err_qz, avg_F, err_F", "def generate_features(self):\n\n # For each STFT timebin, divide data into three bins and get mean power\n data_array = np.array([])\n bl_array = np.array([])\n\n for trial in range(self.data_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.data_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.data_stft_norm.shape[0]):\n data_array = np.append(data_array,[\n np.mean(self.data_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 9:27, tbin, trial])])\n\n data_array = np.reshape(data_array, (-1, 18))\n\n for trial in range(self.bl_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.bl_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.bl_stft_norm.shape[0]):\n bl_array = np.append(bl_array, [\n np.mean(self.bl_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 9:27, tbin, trial])])\n bl_array = np.reshape(bl_array, (-1, 18))\n\n X = np.append(data_array, bl_array, axis=0)\n y = np.append(np.ones(data_array.shape[0]), np.zeros(bl_array.shape[0]))\n\n return X, y", "def fmean(configuration):\n fmean_dict_all = {\n \"HL\" : {'H1' : 100., 'L1' : 100.},\n \"HLV\" : {'H1' : 100., 'L1' : 100., 'V1': 130.},\n \"HLVK\" : {'H1' : 100., 'L1' : 100., 'V1': 130., 'K1' : 130.},\n \"HLVKI\" : {'H1' : 100., 'L1' : 100., 'V1': 130., 'K1' : 130., 'I1' : 100.},\n \"GW170817\" : {'H1' : 100., 'L1' : 100., 'V1': 130.},\n \"GW170814\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9},\n \"GW170817_without_Virgo\" : {'H1' : 100., 'L1' : 100.},\n \"steve\" : {'H1' : 100.0, 'L1' : 100.0, 'V1': 100.0, \"I1\" : 100.0 },\n \"design\" : {'H1' : 100., 'L1' : 100., 'V1': 130. },\n \"india\" : {'H1' : 100., 'I1' : 100., 'L1' : 100., 'V1': 130. },\n \"s6vsr2\" : {'H1' : 180., 'L1' : 180., 'V1': 150. }\n }\n return(fmean_dict_all[configuration])", "def extract_heartrate_tsfresh(transformed: np.ndarray) -> np.ndarray:\n ecg_features = []\n print(\"Extracting TSFRESH statistics from heart rate signals...\")\n\n for x in tqdm(transformed):\n vchange_quantiles_abs = change_quantiles(x[:, -1], 0, 0.8, True, \"var\")\n vfft_aggregated_k = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"kurtosis\"}]))[0][1]\n vmean_abs_change = mean_abs_change(x[:, -1])\n vabsolute_sum_of_changes = absolute_sum_of_changes(x[:, -1])\n vfft_aggregated_s = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"skew\"}]))[0][1]\n vfft_aggregated_c = list(fft_aggregated(x[:, -1], [{\"aggtype\": \"centroid\"}]))[0][1]\n vvariance = variance(x[:, -1])\n vvariation_coefficient = variation_coefficient(x[:, -1])\n\n new_tsfresh = np.array(\n [\n vchange_quantiles_abs,\n vfft_aggregated_k,\n vmean_abs_change,\n vabsolute_sum_of_changes,\n vfft_aggregated_s,\n vfft_aggregated_c,\n vvariance,\n vvariation_coefficient,\n ]\n )\n\n ecg_features.append(np.concatenate(new_tsfresh, axis=0))\n\n return np.array(ecg_features)", "def _coherency_bavg(fxy, fxx, fyy):\r\n\r\n # Average the phases and the magnitudes separately and then recombine:\r\n\r\n p = np.angle(fxy)\r\n p_bavg = np.mean(p)\r\n\r\n m = np.abs(coherency_spec(fxy, fxx, fyy))\r\n m_bavg = np.mean(m)\r\n\r\n # Recombine according to z = r(cos(phi)+sin(phi)i):\r\n return m_bavg * (np.cos(p_bavg) + np.sin(p_bavg) * 1j)", "def get_farm_AEP(\n self,\n freq,\n cut_in_wind_speed=0.001,\n cut_out_wind_speed=None,\n yaw_angles=None,\n turbine_weights=None,\n no_wake=False,\n ) -> float:\n\n # Verify dimensions of the variable \"freq\"\n if not (\n (np.shape(freq)[0] == self.floris.flow_field.n_wind_directions)\n & (np.shape(freq)[1] == self.floris.flow_field.n_wind_speeds)\n & (len(np.shape(freq)) == 2)\n ):\n raise UserWarning(\n \"'freq' should be a two-dimensional array with dimensions \"\n \" (n_wind_directions, n_wind_speeds).\"\n )\n\n # Check if frequency vector sums to 1.0. If not, raise a warning\n if np.abs(np.sum(freq) - 1.0) > 0.001:\n self.logger.warning(\n \"WARNING: The frequency array provided to get_farm_AEP() \"\n \"does not sum to 1.0.\"\n )\n\n # Copy the full wind speed array from the floris object and initialize\n # the the farm_power variable as an empty array.\n wind_speeds = np.array(self.floris.flow_field.wind_speeds, copy=True)\n farm_power = np.zeros((self.floris.flow_field.n_wind_directions, len(wind_speeds)))\n\n # Determine which wind speeds we must evaluate in floris\n conditions_to_evaluate = wind_speeds >= cut_in_wind_speed\n if cut_out_wind_speed is not None:\n conditions_to_evaluate = conditions_to_evaluate & (wind_speeds < cut_out_wind_speed)\n\n # Evaluate the conditions in floris\n if np.any(conditions_to_evaluate):\n wind_speeds_subset = wind_speeds[conditions_to_evaluate]\n yaw_angles_subset = None\n if yaw_angles is not None:\n yaw_angles_subset = yaw_angles[:, conditions_to_evaluate]\n self.reinitialize(wind_speeds=wind_speeds_subset)\n if no_wake:\n self.calculate_no_wake(yaw_angles=yaw_angles_subset)\n else:\n self.calculate_wake(yaw_angles=yaw_angles_subset)\n farm_power[:, conditions_to_evaluate] = (\n self.get_farm_power(turbine_weights=turbine_weights)\n )\n\n # Finally, calculate AEP in GWh\n aep = np.sum(np.multiply(freq, farm_power) * 365 * 24)\n\n # Reset the FLORIS object to the full wind speed array\n self.reinitialize(wind_speeds=wind_speeds)\n\n return aep", "def ALPflux(self, EMeV, t_sec, g11):\n na_dedt = self._alp(EMeV=EMeV, ts = t_sec, g10 = g11 * 0.1) # alp spectrum per energy and time\n return na_dedt * 1.e52", "def FeatureExtraction(ppg, accx, accy, accz):\n\n fs = 125\n n = len(ppg) * 4\n # applying fast Fourier transform\n freqs = np.fft.rfftfreq(n, 1/fs)\n fft = np.abs(np.fft.rfft(ppg,n))\n fft[freqs <= 40/60.0] = 0.0\n fft[freqs >= 240/60.0] = 0.0\n \n ## calculating L2 norm\n acc_mag = np.sqrt(accx**2 + accy**2 + accz**2)\n acc_fft = np.abs(np.fft.rfft(acc_mag, n))\n acc_fft[freqs <= 40/60.0] = 0.0\n acc_fft[freqs >= 240/60.0] = 0.0\n \n ppg_feature = freqs[np.argmax(fft)]\n acc_feature = freqs[np.argmax(acc_fft)]\n \n return (np.array([ppg_feature, acc_feature]), ppg, accx, accy, accz)", "def get_brightest_mean(self, num_pix=3):\n peak_x = np.zeros(\n [len(self.pixel_x)]) # Create blank arrays for peaks\n # rather than a dict (faster)\n peak_y = np.zeros(peak_x.shape)\n peak_amp = np.zeros(peak_x.shape)\n\n # Loop over all tels to take weighted average of pixel\n # positions This loop could maybe be replaced by an array\n # operation by a numpy wizard\n\n tel_num = 0\n for tel in self.image:\n top_index = self.image[tel].argsort()[-1 * num_pix:][::-1]\n print(top_index, self.pixel_x[tel][top_index],\n self.image[tel][top_index])\n weight = self.image[tel][top_index]\n weighted_x = self.pixel_x[tel][top_index] * weight\n weighted_y = self.pixel_y[tel][top_index] * weight\n\n ppx = np.sum(weighted_x) / np.sum(weight)\n ppy = np.sum(weighted_y) / np.sum(weight)\n\n peak_x[tel_num] = ppx # Fill up array\n peak_y[tel_num] = ppy\n peak_amp[tel_num] = np.sum(weight)\n tel_num += 1\n\n self.peak_x = peak_x # * unit # Add to class member\n self.peak_y = peak_y # * unit\n self.peak_amp = peak_amp", "def calculate_gains(self,data, map_fits, avg_map_fits):\n nFeeds,nBands,nChans,nParams = map_fits['Values'].shape\n frequencies = data[f'{self.level2}/averaged_frequency'][...]\n kb = 1.38064852e-23\n c = 2.99792458e8\n scale = 2 * kb * (1e9/ c)**2 * 1e26\n\n source = self.getSource(data)\n self.flux = np.zeros((len(self.feeds),nBands, nChans))\n self.gain = np.zeros((len(self.feeds),nBands, nChans))\n\n for i,(ifeed,feed) in enumerate(zip(self.feedlist,self.feeds)):\n for iband in range(nBands):\n nu = frequencies[iband]\n sigx = avg_map_fits[iband]['Values'][2] \n sigy = avg_map_fits[iband]['Values'][2]*avg_map_fits[iband]['Values'][4]\n amps = map_fits['Values'][i,iband,:,0]\n self.flux[i,iband,:] = 2*np.pi*amps*sigx*sigy*(np.pi/180.)**2 * scale*nu**2\n mdl_flux = self.models[source](nu,map_fits['MJD'],return_jansky=True,allpos=True)\n self.gain[i,iband,:] = self.flux[i,iband,:]/mdl_flux\n\n return self.flux, self.gain", "def return_average(rgb):\n value = sum(rgb[:3])//3 \n return (value, value, value)", "def sharpe(returns, freq=30, rfr=0):\n return (np.sqrt(freq) * np.mean(returns - rfr + eps)) / np.std(returns - rfr + eps)", "def stft(db,istart=0,istop=86400,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**9):\r\n \r\n #get length of input time series if there is two columns\r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm<fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=fx.shape[0]\r\n fm=1\r\n if fm>1:\r\n fx=fx.reshape(fn)\r\n else:\r\n fx=fx.reshape(fn)\r\n #make a hanning window to minimize aliazing and Gibbs effect of short time \r\n #windows\r\n h=normalizeL2(np.hanning(nh))\r\n #make a hanning window to smooth in frequency domain\r\n if ng!=1:\r\n if np.remainder(ng,2)!=1:\r\n ng=ng-1\r\n print 'ng forced to be odd as ng-1'\r\n else:\r\n pass\r\n g=normalizeL2(np.hanning(ng))\r\n else:\r\n pass\r\n #make time step list\r\n tlst=np.arange(start=0,stop=fn-nh+1,step=tstep)\r\n #make a frequency list for plotting exporting only positive frequencies\r\n df=float(df)\r\n flst=np.fft.fftfreq(nfbins,1/df)[0:nfbins/2] #get only positive frequencies\r\n #initialize the TFD array\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n fa=sps.hilbert(dctrend(fx))\r\n \r\n for place,ii in enumerate(tlst):\r\n fxwin=fa[ii:ii+nh]*h\r\n #get only positive frequencies\r\n FXwin=np.fft.fft(padzeros(fxwin,npad=nfbins))[:nfbins/2]\r\n #smooth in frequency plane\r\n if ng!=1:\r\n FXwin=np.convolve(padzeros(FXwin,npad=len(FXwin)+ng-1),g,'valid')\r\n else:\r\n pass\r\n #pull out only positive quadrant, flip array for plotting\r\n tfarray[:,place]=FXwin[::-1]\r\n \r\n return tfarray,tlst,flst", "def find_average(self):\n df = self.find_top_seven_routes()\n # Find the total of the frequency of the top 7 traveled routes\n total =df.sort_values('Frequency', ascending=False).Frequency[:7].sum()\n # Calculate the average by dividing each frequency by the total\n df['average'] = df['Frequency'] / total\n\n return df", "def get_avg_f1(ccs):\n scorers = ccs.scorers\n f1_scores = []\n for scorer in scorers:\n f1_scores.append(scorer.get_f1())\n\n avg_f1 = 100 * np.average(f1_scores)\n\n return avg_f1", "def allele_freqs(self):\n diploid = self.geno.sum(2) * 0.5\n return np.nanmean(diploid, axis = 0)", "def fringe_frequency(self, wavelength=0.028, terrestrial_latitude=37.873199, h_s0=0):\n\t\tBew, Bns, baseline = bf.baseline_script_2D(self.hour_angles, 0, self.volts, self.times)\n\t\tfirst_term = Bew / wavelength * np.cos(self.dec) * cos(h_s0)\n\t\tsecond_term = Bns / wavelength * np.sin(terrestrial_latitude) * np.cos(self.dec) * np.sin(h_s0)\n\t\treturn first_term - second_term", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def ram_average(self):\n return _favg(self.ram_samples)", "def merge_hpx_counts_cubes(filelist):\n out_prim = None\n out_skymap = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n out_name = None\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_null_primary(fin[0], out_prim)\n out_name = fin[1].name\n\n map_in = HpxMap.create_from_hdulist(fin)\n out_skymap = update_hpx_skymap_allsky(map_in, out_skymap)\n if i == 0:\n try:\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n except KeyError:\n out_ebounds = update_energies(fin[\"ENERGIES\"], out_ebounds)\n try:\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n except KeyError:\n pass\n\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n try:\n date_end = fin[0].header['DATE-END']\n except KeyError:\n date_end = None\n else:\n fin.close()\n\n out_skymap_hdu = out_skymap.create_image_hdu(\"SKYMAP\")\n\n hdulist = [out_prim, out_skymap_hdu, out_ebounds]\n\n if len(datalist_gti) > 0:\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n hdulist.append(out_gti)\n\n for hdu in hdulist:\n if date_end:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n\n return fits.HDUList(hdulist)", "def GetMean(trrecord, samplelists=[], uselength=True):\n if len(samplelists) == 0: samplelists.append(None)\n return [utils.GetMean(trrecord.GetAlleleFreqs(samplelist=sl, uselength=True)) for sl in samplelists]", "def average_gradients(tower_grads):\r\n average_grads = []\r\n for grad_and_vars in zip(*tower_grads):\r\n # Note that each grad_and_vars looks like the following:\r\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\r\n grads = []\r\n for g, _ in grad_and_vars:\r\n # Add 0 dimension to the gradients to represent the tower.\r\n expanded_g = tf.expand_dims(g, 0)\r\n\r\n # Append on a 'tower' dimension which we will average over below.\r\n grads.append(expanded_g)\r\n\r\n # Average over the 'tower' dimension.\r\n grad = tf.concat(grads, 0)\r\n grad = tf.reduce_mean(grad, 0)\r\n\r\n # Keep in mind that the Variables are redundant because they are shared\r\n # across towers. So .. we will just return the first tower's pointer to\r\n # the Variable.\r\n v = grad_and_vars[0][1]\r\n grad_and_var = (grad, v)\r\n average_grads.append(grad_and_var)\r\n return average_grads", "def get_peak_frequency(ut_average: np.ndarray, \n omega: np.ndarray) -> Tuple[float, float, float]:\n\n # We get the indices of the peak of the average fft.\n n = len(omega)\n argmax = np.argmax(np.abs(ut_average))\n [index_x, index_y, index_z] = np.unravel_index(argmax, (n, n, n))\n\n # We then use those indices to get the peak frequency.\n return (omega[index_x], omega[index_y], omega[index_z])", "def get_gaussian_ff_top(self, filenames):\n amber_ffs = []\n for fname in filenames:\n amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))\n return amber_ffs", "def average_gradients(tower_grads):\n\taverage_grads = []\n\tfor grad_and_vars in zip(*tower_grads):\n\t\t# Note that each grad_and_vars looks like the following:\n\t\t# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n\t\tgrads = []\n\t\tfor g, _ in grad_and_vars:\n\t\t\t# Add 0 dimension to the gradients to represent the tower.\n\t\t\texpanded_g = tf.expand_dims(g, 0)\n\n\t\t\t# Append on a 'tower' dimension which we will average over below.\n\t\t\tgrads.append(expanded_g)\n\t\t\n\t\t# Average over the 'tower' dimension.\n\t\tgrad = tf.concat(axis=0, values=grads)\n\t\tgrad = tf.reduce_mean(grad, 0)\n\n\t\t# Keep in mind that the Variables are redundant because they are shared\n\t\t# across towers. So .. we will just return the first tower's pointer to\n\t\t# the Variable.\n\t\tv = grad_and_vars[0][1]\n\t\tgrad_and_var = (grad, v)\n\t\taverage_grads.append(grad_and_var)\n\treturn average_grads", "def average_gradients(grads):\n average_grads = []\n for grad_and_vars in zip(*grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(0, grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads", "def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)", "def fsa_simple(self,var,**kwargs):\n favg=np.zeros(self.mesh.psi_surf.size)\n for i in range(0,self.mesh.psi_surf.size):\n s1=0\n s2=0\n for j in range(0,self.mesh.surf_len[i]):\n idx=self.mesh.surf_idx[i,j] - 1\n s1=s1+var[idx]*self.mesh.node_vol[i]\n s2=s2+self.mesh.node_vol[i]\n favg[i]=s1/s2\n return favg", "def get_amplitude_map(self, timeWindow=(0, 0.5)):\n\n windowIndex = np.logical_and(self.time>=timeWindow[0], self.time<=timeWindow[1])\n\n indON,indOFF,allAltPos,allAziPos = self._sort_index()\n\n ampON = np.zeros(indON.shape); ampON[:]=np.nan; ampOFF = ampON.copy()\n\n for i in np.ndindex(indON.shape):\n traceIndON = indON[i]; traceIndOFF = indOFF[i]\n if traceIndON is not None: ampON[i] = np.mean(np.mean(self.data[traceIndON]['traces'],axis=0)[windowIndex])\n if traceIndOFF is not None: ampOFF[i] = np.mean(np.mean(self.data[traceIndOFF]['traces'],axis=0)[windowIndex])\n\n return ampON, ampOFF, allAltPos, allAziPos", "def neighbor_average_waveform(waveforms, neighbors, lwt):\n n_neighbors = neighbors.shape[0]\n sum_ = waveforms * lwt\n n = np.full(waveforms.shape, lwt, dtype=np.int32)\n for i in prange(n_neighbors):\n pixel = neighbors[i, 0]\n neighbor = neighbors[i, 1]\n sum_[pixel] += waveforms[neighbor]\n n[pixel] += 1\n return sum_ / n", "def _average_gradients(self, tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # If no gradient for a variable, exclude it from output\n if grad_and_vars[0][0] is None:\n continue\n\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n\n return average_grads", "def harvest_engrad(engrad):\n try:\n lines = open(engrad).readlines()\n except IOError:\n return []\n num_atoms = int(lines[3].strip())\n energy = lines[7].strip()\n grad = []\n for i in range(12, 13 + num_atoms*3, 3):\n grad.append(list(map(float, lines[i:i + 3])))\n return grad", "def avg_grads(tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(input_tensor=grad, axis=0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads" ]
[ "0.7237736", "0.5336351", "0.53191733", "0.5311113", "0.52894306", "0.5208346", "0.5117886", "0.5105985", "0.50825256", "0.5041957", "0.4994894", "0.49861515", "0.49033117", "0.48573145", "0.48243278", "0.48106575", "0.4807822", "0.47676003", "0.4739023", "0.47271356", "0.47114715", "0.470455", "0.46933037", "0.46865195", "0.4659219", "0.46554795", "0.46553335", "0.4623883", "0.460616", "0.46022913", "0.46004838", "0.45952803", "0.45926473", "0.45897472", "0.4586358", "0.45794448", "0.45792228", "0.4568704", "0.45681202", "0.4562965", "0.45511946", "0.45468065", "0.4530452", "0.45243287", "0.45138726", "0.45119843", "0.45066077", "0.4505723", "0.4495511", "0.4476094", "0.44740495", "0.44697607", "0.4443156", "0.44420657", "0.44372106", "0.44364327", "0.44316328", "0.4429043", "0.44235995", "0.44198442", "0.44197738", "0.4418764", "0.4408131", "0.4408131", "0.44071537", "0.4406031", "0.44003564", "0.43978775", "0.43972033", "0.4397014", "0.4388405", "0.43874592", "0.4381635", "0.43780252", "0.43727016", "0.43719608", "0.4365798", "0.43657127", "0.43656263", "0.43650037", "0.4361444", "0.4357533", "0.43542916", "0.4353419", "0.43498346", "0.43414035", "0.4341323", "0.43369347", "0.4334821", "0.43339676", "0.4333671", "0.4331911", "0.43307716", "0.43232515", "0.43168986", "0.4306948", "0.43060082", "0.43018946", "0.4289588", "0.42886034" ]
0.71738416
1
Converts Equatorial coordinates to Galactic coordinates
def eq2gal(ra, dec): gal=ephem.Galactic(ephem.Equatorial(ra, dec)) gl=180.0*gal.long.real/math.pi gb=180.0*gal.lat.real/math.pi return (gl, gb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EquatorialToGalactic(Equatorial):\n \n # ra, dec, s => l,b,s\n ra = Equatorial[:,0]\n dec = Equatorial[:,1]\n s = Equatorial[:,2]\n cd = np.cos(dec)\n sd = np.sin(dec)\n b = np.arcsin(np.sin(decgp)*sd+np.cos(decgp)*cd*np.cos(ra-ragp))\n l = lcp-np.arctan2(cd*np.sin(ra-ragp),np.cos(decgp)*sd-np.sin(decgp)*cd*np.cos(ra-ragp))\n l[l<0] += 2.*np.pi; \n if(len(Equatorial[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n # vlos, muracos(dec), mudec => vlos, mulcosb, mub\n vlos = Equatorial[:,3]\n muracosd = Equatorial[:,4]\n mudec = Equatorial[:,5]\n cb = np.cos(b)\n sb = np.sin(b)\n A11 = (np.sin(decgp)*cd-np.cos(decgp)*sd*np.cos(ra-ragp))/cb\n A12 = -np.cos(decgp)*np.sin(ra-ragp)/cb\n A21 = (np.cos(decgp)*cd+np.sin(decgp)*sd*np.cos(ra-ragp)+sb*np.cos(lcp-l)*A11)/np.sin(lcp-l)\n A22 = (np.sin(decgp)*np.sin(ra-ragp)+sb*np.cos(lcp-l)*A12)/np.sin(lcp-l) \n index = np.where(np.fabs(np.cos(lcp-l)) > np.fabs(np.sin(lcp-l)))\n A21[index] = (sd[index]*np.sin(ra[index]-ragp)-sb[index]*np.sin(lcp-l[index])*A11[index])/np.cos(lcp-l[index])\n A22[index] = -(np.cos(ra[index]-ragp)+sb[index]*np.sin(lcp-l[index])*A12[index])/np.cos(lcp-l[index])\n \n Galactic = np.column_stack((l,b,s,vlos,A21*mudec+A22*muracosd,A11*mudec+A12*muracosd))\n \n return Galactic", "def GalacticToEquatorial(Galactic):\n \n # l,b,s => ra, dec, s\n l = Galactic[:,0]\n b = Galactic[:,1]\n cb = np.cos(b)\n sb = np.sin(b)\n dec = np.arcsin(np.cos(decgp)*cb*np.cos(l-lcp)+sb*np.sin(decgp))\n ra = ragp+np.arctan2(cb*np.sin(lcp-l),sb*np.cos(decgp)-cb*np.sin(decgp)*np.cos(l-lcp))\n ra[ra>2.*np.pi] -= 2.*np.pi\n if (len(Galactic[0,:])==3):\n Equatorial = np.column_stack([ra,dec,Galactic[:,2]])\n else:\n # vlos, mulcos(b), mub => vlos, muracos(dec), mudec\n cd = np.cos(dec)\n sd = np.sin(dec)\n A11 = (np.sin(decgp)*cd-np.cos(decgp)*sd*np.cos(ra-ragp))/cb\n A12 = -np.cos(decgp)*np.sin(ra-ragp)/cb\n A21 = (np.cos(decgp)*cd+np.sin(decgp)*sd*np.cos(ra-ragp)+sb*np.cos(lcp-l)*A11)/np.sin(lcp-l)\n A22 = (np.sin(decgp)*np.sin(ra-ragp)+sb*np.cos(lcp-l)*A12)/np.sin(lcp-l)\n index = np.where(np.fabs(np.cos(lcp-l))>np.fabs(np.sin(lcp-l)))\n A21[index] = (sd[index]*np.sin(ra[index]-ragp)-sb[index]*np.sin(lcp-l[index])*A11[index])/np.cos(lcp-l[index])\n A22[index] =-(np.cos(ra[index]-ragp)+sb[index]*np.sin(lcp-l[index])*A12[index])/np.cos(lcp-l[index])\n Prod = A11*A22-A12*A21\n Equatorial = np.column_stack((ra,dec,Galactic[:,2],Galactic[:,3],\n (A11*Galactic[:,4]-A21*Galactic[:,5])/Prod,\n (A22*Galactic[:,5]-A12*Galactic[:,4])/Prod))\n \n return Equatorial", "def CartesianToGalactic(Cartesian,SolarPosition): \n\t \n # x,y,z->l,b,s\n tmp1 = SolarPosition[0]-Cartesian[:,0]\n tmp2 = Cartesian[:,1]\n tmp3 = Cartesian[:,2]-SolarPosition[1]\n s = np.sqrt(tmp1*tmp1+tmp2*tmp2+tmp3*tmp3)\n l = np.arctan2(tmp2,tmp1)\n b = np.arcsin(tmp3/s)\n l[l<0.] += 2.*np.pi; \n\n if(len(Cartesian[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n \t # vx,vy,vz -> vlos,mu_lcos(b),mu_b\n vx = -Cartesian[:,3]-SolarPosition[2]\n vy = Cartesian[:,4]-SolarPosition[3]\n vz = Cartesian[:,5]-SolarPosition[4]\n cl = np.cos(l)\n sl = np.sin(l)\n cb = np.cos(b)\n sb = np.sin(b)\n vlos = vx*cl*cb+vy*sl*cb+vz*sb;\n mul = (-vx*sl+vy*cl)/(pm2vel*s)\n mub = (-vx*cl*sb-vy*sl*sb+vz*cb)/(pm2vel*s)\n Galactic = np.column_stack((l,b,s,vlos,mul,mub))\n \n return Galactic", "def gal2equ(ll, bb):\n ll, bb = map(radians, (ll, bb))\n ra_gp = radians(192.85948)\n de_gp = radians(27.12825)\n lcp = radians(122.932)\n sin_d = sin(de_gp) * sin(bb) + cos(de_gp) * cos(bb) * cos(lcp - ll)\n ramragp = (arctan2(cos(bb) * sin(lcp - ll),\n cos(de_gp) * sin(bb) - sin(de_gp) *\n cos(bb) * cos(lcp - ll)))\n dec = arcsin(sin_d)\n ra = (ramragp + ra_gp + 2 * pi) % (2 * pi)\n ra = ra % 360\n return degrees(ra), degrees(dec)", "def transform_to_galactic(icrs_coords):\n\n galactic_coords = icrs_coords.transform_to(Galactic())\n sun_motion = CartesianDifferential(_Usun, _vc+_Vsun, _Wsun)\n galactocentric_cartesian = icrs_coords.transform_to(Galactocentric(galcen_distance=_Rsun, z_sun=_zsun, galcen_v_sun=sun_motion))\n galactocentric_cartesian.set_representation_cls(base='cartesian')\n galactocentric_cylindrical = icrs_coords.transform_to(Galactocentric(galcen_distance=_Rsun, z_sun=_zsun, galcen_v_sun=sun_motion))\n galactocentric_cylindrical.set_representation_cls(base='cylindrical')\n\n return galactic_coords, galactocentric_cartesian, galactocentric_cylindrical", "def gal2equ(l, b):\n rmtx = np.matrix([[-0.054875539726, 0.494109453312, -0.867666135858],\n [-0.873437108010, -0.444829589425, -0.198076386122],\n [-0.483834985808, 0.746982251810, 0.455983795705]])\n cosl = np.cos(l * degree)\n sinl = np.sin(l * degree)\n cosb = np.cos(b * degree)\n sinb = np.sin(b * degree)\n gvec = np.matrix([[cosl * cosb], [sinl * cosb], [sinb]])\n cvec = rmtx * gvec\n\n x, y, z = (cvec.item(0), cvec.item(1), cvec.item(2))\n r = np.sqrt(x * x + y * y)\n ra = 0\n dec = 0\n if r != 0.:\n ra = np.arctan2(y, x) / degree\n if ra < 0:\n ra += 360.\n if z != 0:\n dec = np.arctan2(z, r) / degree\n return (ra, dec)", "def galactic_latlon(self):\n vector = _GALACTIC.dot(self.position.au)\n d, lat, lon = to_polar(vector)\n return (Angle(radians=lat, signed=True),\n Angle(radians=lon),\n Distance(au=d))", "def grid_to_geodetic(self, northing, easting):\n\n ξ = (northing - self.fn) / (self.k0 * self.â)\n η = (easting - self.fe) / (self.k0 * self.â)\n\n ξ_prim = ξ -\\\n self.δ1 * math.sin(2 * ξ) * math.cosh(2 * η) -\\\n self.δ2 * math.sin(4 * ξ) * math.cosh(4 * η) -\\\n self.δ3 * math.sin(6 * ξ) * math.cosh(6 * η) -\\\n self.δ4 * math.sin(8 * ξ) * math.cosh(8 * η)\n\n η_prim = η -\\\n self.δ1 * math.cos(2 * ξ) * math.sinh(2 * η) -\\\n self.δ2 * math.cos(4 * ξ) * math.sinh(4 * η) -\\\n self.δ3 * math.cos(6 * ξ) * math.sinh(6 * η) -\\\n self.δ4 * math.cos(8 * ξ) * math.sinh(8 * η)\n\n φ_star = math.asin(math.sin(ξ_prim) / math.cosh(η_prim))\n δλ = math.atan(math.sinh(η_prim) / math.cos(ξ_prim))\n\n λ = self.λ0 + δλ\n φ = φ_star + math.sin(φ_star) * math.cos(φ_star) * (self.A_star +\n self.B_star * math.sin(φ_star) ** 2 +\n self.C_star * math.sin(φ_star) ** 4 +\n self.D_star * math.sin(φ_star) ** 6)\n\n return math.degrees(φ), math.degrees(λ)", "def geo2desiredENU(self, lat, lon, h):\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\t\tx,y,z = pm.geodetic2enu(lat, lon, h, lat0, lon0, self.h0)\n\n\t\tx_L = cos(self.local_rot)*x + sin(self.local_rot)*y\n\t\ty_L = -1*sin(self.local_rot)*x + cos(self.local_rot)*y\n\n\t\tz = self.curr_z_enu - self.GND_ALT\n\t\treturn x_L, y_L, z", "def geodetic2ecef(self,lla, wantDeg=False):\n lat,lon,alt = lla\n\n if wantDeg:\n lat = radians(lat)\n lon = radians(lon)\n\n xi = sqrt(1 - self.esq * sin(lat)*sin(lat))\n x = (self.a / xi + alt) * cos(lat) * cos(lon)\n y = (self.a / xi + alt) * cos(lat) * sin(lon)\n z = (self.a / xi * (1-self.esq) + alt) * sin(lat)\n\n return np.array([x, y, z])", "def GalacticToCartesian(Galactic,SolarPosition): \n \n # l,b,s->x,y,z\n cl = np.cos(Galactic[:,0])\n sl = np.sin(Galactic[:,0])\n cb = np.cos(Galactic[:,1])\n sb = np.sin(Galactic[:,1])\n x = SolarPosition[0]-Galactic[:,2]*cb*cl\n y = Galactic[:,2]*cb*sl\n z = Galactic[:,2]*sb+SolarPosition[1]\n\n if(len(Galactic[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vlos,mu_lcos(b),mu_b -> vx,vy,vz\n vl = pm2vel*Galactic[:,2]*Galactic[:,4]\n vb = pm2vel*Galactic[:,2]*Galactic[:,5]\n tmp2 = cb*Galactic[:,3]-sb*vb\n vx = cl*tmp2-sl*vl+SolarPosition[2]\n vy = sl*tmp2+cl*vl+SolarPosition[3]\n vz = sb*Galactic[:,3]+cb*vb+SolarPosition[4]\n Cartesian = np.column_stack((x,y,z,-vx,vy,vz))\n \n return Cartesian", "def ecef2geodetic(self, ecef, wantDeg=False):\n x,y,z = ecef\n\n r = sqrt(x*x + y*y)\n Esq = self.a*self.a - self.b*self.b\n F = 54 * self.b*self.b * z*z\n G = r*r + (1-self.esq) * z*z - self.esq*Esq\n C = (self.esq*self.esq * F * r*r) / (G**3)\n S = np.cbrt(1 + C + sqrt(C*C + 2*C))\n P = F / (3 * (S + 1 / S + 1)**2 * G*G)\n Q = np.sqrt(1 + 2 * self.esq*self.esq * P)\n r_0 = -(P * self.esq * r) / (1+Q) + np.sqrt(0.5 * self.a*self.a*(1 + 1.0 / Q) - \\\n P * (1-self.esq) * z*z / (Q * (1+Q)) - 0.5 * P * r*r)\n t = (r - self.esq * r_0)**2\n U = np.sqrt(t + z*z)\n V = np.sqrt(t + (1-self.esq) * z*z)\n Z_0 = self.b*self.b * z / (self.a * V)\n alt = U * (1 - self.b*self.b / (self.a * V))\n lat = arctan((z + self.e1sq * Z_0) / r)\n lon = arctan2(y, x)\n\n if wantDeg:\n lat = degrees(lat)\n lon = degrees(lon)\n\n return np.array([lat, lon, alt])", "def geocentric2geodetic(latitude):\n\n return np.rad2deg(np.arctan(1.0067395 * np.tan(np.deg2rad(latitude))))", "def radec_to_galactic_astropy(coords):\n ra_hms, dec_hms = Angle(coords[0]), Angle(coords[1])\n radec_coords_deg = SkyCoord(ra=ra_hms, dec=dec_hms, frame='icrs')\n galactic_coords_str = radec_coords_deg.transform_to('galactic').to_string()\n galactic_coords_degs = [float(coord) for coord in galactic_coords_str.split(' ')]\n return galactic_coords_degs", "def radec_to_galactic(coords):\n\n def gross_coords_to_rads(coords):\n ra, dec = coords\n coords = SkyCoord(ra=ra, dec=dec, frame='icrs')\n ra_rad, dec_rad = [float(a) * np.pi/180\n for a in coords.to_string().split()]\n return (ra_rad, dec_rad)\n\n ra, dec = gross_coords_to_rads(coords)\n ra_NGP, dec_NGP = gross_coords_to_rads(['12h51m26.00s', '+27d 7m 42.0s'])\n l_NCP = 122.93 * np.pi/180\n\n b = np.arcsin(np.sin(dec_NGP) * np.sin(dec) \\\n + np.cos(dec_NGP) * np.cos(dec) \\\n * np.cos(ra - ra_NGP))\n\n x1 = np.cos(dec) * np.sin(ra - ra_NGP)\n x2 = np.cos(dec_NGP) * np.sin(dec) \\\n - np.sin(dec_NGP) * np.cos(dec) * np.cos(ra - ra_NGP)\n\n # Arctan2 is basically a smart version of arctan(x1/x2)\n l = l_NCP - np.arctan2(x1, x2)\n\n # Convert to degrees and round out to 4 decs for prettiness.\n l, b = round(l * 180/np.pi, 4), round(b * 180/np.pi, 4)\n return [l, b]", "def galactic_position(self):\n vector = _GALACTIC.dot(self.position.au)\n return Distance(vector)", "def equ2gal(ra, dec):\n ra, dec = map(radians, (ra, dec))\n ra_gp = radians(192.85948)\n de_gp = radians(27.12825)\n lcp = radians(122.932)\n sin_b = (sin(de_gp) * sin(dec) + cos(de_gp) *\n cos(dec) * cos(ra - ra_gp))\n lcpml = arctan2(cos(dec) * sin(ra - ra_gp),\n cos(de_gp) * sin(dec) - sin(de_gp) *\n cos(dec) * cos(ra - ra_gp))\n bb = arcsin(sin_b)\n ll = (lcp - lcpml + 2 * pi) % (2 * pi)\n ll = ll % 360\n return degrees(ll), degrees(bb)", "def geodetic_to_grid(self, latitude, longitude):\n\n φ = math.radians(latitude)\n λ = math.radians(longitude)\n\n φ_star = φ - math.sin(φ) * math.cos(φ) * (self.A +\n self.B * math.sin(φ) ** 2 +\n self.C * math.sin(φ) ** 4 +\n self.D * math.sin(φ) ** 6)\n\n δλ = λ - self.λ0\n ξ_prim = math.atan(math.tan(φ_star) / math.cos(δλ))\n η_prim = math.atanh(math.cos(φ_star) * math.sin(δλ))\n\n x = self.k0 * self.â * (ξ_prim +\n self.β1 * math.sin(2 * ξ_prim) * math.cosh(2 * η_prim) +\n self.β2 * math.sin(4 * ξ_prim) * math.cosh(4 * η_prim) +\n self.β3 * math.sin(6 * ξ_prim) * math.cosh(6 * η_prim) +\n self.β4 * math.sin(8 * ξ_prim) * math.cosh(8 * η_prim)) + self.fn\n\n y = self.k0 * self.â * (η_prim +\n self.β1 * math.cos(2 * ξ_prim) * math.sinh(2 * η_prim) +\n self.β2 * math.cos(4 * ξ_prim) * math.sinh(4 * η_prim) +\n self.β3 * math.cos(6 * ξ_prim) * math.sinh(6 * η_prim) +\n self.β4 * math.cos(8 * ξ_prim) * math.sinh(8 * η_prim)) + self.fe\n\n return x, y", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def equ2gal(ra, dec):\n rmtx = np.matrix([[-0.054875539726, 0.494109453312, -0.867666135858],\n [-0.873437108010, -0.444829589425, -0.198076386122],\n [-0.483834985808, 0.746982251810, 0.455983795705]])\n cosr = np.cos(ra * degree)\n sinr = np.sin(ra * degree)\n cosd = np.cos(dec * degree)\n sind = np.sin(dec * degree)\n evec = np.matrix([[cosr * cosd], [sinr * cosd], [sind]])\n gvec = rmtx.transpose() * evec\n\n x, y, z = (gvec.item(0), gvec.item(1), gvec.item(2))\n r = np.sqrt(x * x + y * y)\n l = 0.\n b = 0.\n if r != 0.:\n l = np.arctan2(y, x) / degree\n if l < 0:\n l += 360.\n if z != 0:\n b = np.arctan2(z, r) / degree\n return (l, b)", "def desiredENU2geo(self, x_L, y_L, z):\n\t\tx = cos(self.local_rot)*x_L - sin(self.local_rot)*y_L\n\t\ty = sin(self.local_rot)*x_L + cos(self.local_rot)*y_L\n\n\t\tlat0 = self.origin[0]\n\t\tlon0 = self.origin[1]\n\n\t\tlat, lon, alt = pm.enu2geodetic(x, y, z, lat0, lon0, self.h0)\n\t\treturn lat, lon, alt", "def geo2Cartesian(lat, lon, h, julian_date):\n\n lat_rad = np.radians(lat)\n lon_rad = np.radians(lon)\n\n # Calculate ECEF coordinates\n ecef_x, ecef_y, ecef_z = latLonAlt2ECEF(lat_rad, lon_rad, h)\n\n\n # Get Local Sidreal Time\n LST_rad = math.radians(JD2LST(julian_date, np.degrees(lon_rad))[0])\n\n\n # Calculate the Earth radius at given latitude\n Rh = math.sqrt(ecef_x**2 + ecef_y**2 + ecef_z**2)\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid)\n lat_geocentric = math.atan2(ecef_z, math.sqrt(ecef_x**2 + ecef_y**2))\n\n # Calculate Cartesian ECI coordinates (in meters), in the epoch of date\n x = Rh*np.cos(lat_geocentric)*np.cos(LST_rad)\n y = Rh*np.cos(lat_geocentric)*np.sin(LST_rad)\n z = Rh*np.sin(lat_geocentric)\n\n return x, y, z", "def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)", "def ToMercDegrees(y, num_tiles):\n # Calculate on standard Mercator scale that spans from -pi to pi.\n # There is no intrinsic reason for using these values, which correspond to\n # about -85 to 85 degrees, other than it matches (albeit somewhat\n # misleadingly) the longitudinal radian span, and it's the span Google\n # uses for its 2d maps.\n y_merc = 2.0 * math.pi * y / num_tiles - math.pi\n latitude_rad = (math.atan(math.exp(y_merc)) - math.pi / 4.0) * 2.0\n return latitude_rad / math.pi * 180.0", "def coords_to_gps(self,coords):\n return ((self.max_lat - (self.lat_step * (0.5+coords[0]))),(self.min_lon + (self.lon_step * (0.5+coords[1]))))", "def scalar_r2g(al, be, ga, rlon, rlat):\n\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n rotate_matrix = np.zeros(shape=(3, 3))\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n\n rotate_matrix = np.linalg.pinv(rotate_matrix)\n\n rlat = rlat * rad\n rlon = rlon * rad\n\n # Rotated Cartesian coordinates:\n xr = np.cos(rlat) * np.cos(rlon)\n yr = np.cos(rlat) * np.sin(rlon)\n zr = np.sin(rlat)\n\n # Geographical Cartesian coordinates:\n xg = rotate_matrix[0, 0] * xr + rotate_matrix[0, 1] * yr + rotate_matrix[0, 2] * zr\n yg = rotate_matrix[1, 0] * xr + rotate_matrix[1, 1] * yr + rotate_matrix[1, 2] * zr\n zg = (\n rotate_matrix[2, 0] * xr + rotate_matrix[2, 1] * yr + rotate_matrix[2, 2] * zr\n )\n\n # Geographical coordinates:\n lat = np.arcsin(zg)\n lon = np.arctan2(yg, xg)\n\n a = np.where((np.abs(xg) + np.abs(yg)) == 0)\n if a:\n lon[a] = 0\n\n lat = lat / rad\n lon = lon / rad\n\n return (lon, lat)", "def distance_to_galactic_center(self):\n l, b = self.galactic_coords\n h_star_gcp = self.distance * np.sin(b)\n d_star_sun = self.distance * np.cos(b)\n d_star_gc = np.sqrt(d_star_sun**2 + d_sun_GC**2 - 2*d_star_sun*d_sun_GC*np.cos(l))\n return d_star_gc", "def pm_eq2gal(self, long_in=\"ra\", lat_in=\"dec\", pm_long=\"pm_ra\", pm_lat=\"pm_dec\", pm_long_out=\"pm_l\", pm_lat_out=\"pm_b\",\n name_prefix=\"__proper_motion_eq2gal\",\n right_ascension_galactic_pole=192.85,\n declination_galactic_pole=27.12,\n propagate_uncertainties=False,\n radians=False, inverse=False,\n inplace=False):\n \"\"\"mu_gb = mu_dec*(cdec*sdp-sdec*cdp*COS(ras))/cgb $\n - mu_ra*cdp*SIN(ras)/cgb\"\"\"\n df = self.df if inplace else self.df.copy()\n long_in_original = long_in = df._expr(long_in)\n lat_in_original = lat_in = df._expr(lat_in)\n pm_long = df._expr(pm_long)\n pm_lat = df._expr(pm_lat)\n if not radians:\n long_in = long_in * np.pi/180\n lat_in = lat_in * np.pi/180\n c1_name = name_prefix + \"_C1\"\n c2_name = name_prefix + \"_C2\"\n right_ascension_galactic_pole = math.radians(right_ascension_galactic_pole)\n declination_galactic_pole = math.radians(declination_galactic_pole)\n df[c1_name] = c1 = np.sin(declination_galactic_pole) * np.cos(lat_in) - np.cos(declination_galactic_pole)*np.sin(lat_in)*np.cos(long_in-right_ascension_galactic_pole)\n df[c2_name] = c2 = np.cos(declination_galactic_pole) * np.sin(long_in - right_ascension_galactic_pole)\n c1 = df[c1_name]\n c2 = df[c2_name]\n if inverse:\n df[pm_long_out] = ( c1 * pm_long + -c2 * pm_lat)/np.sqrt(c1**2+c2**2)\n df[pm_lat_out] = ( c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)\n else:\n df[pm_long_out] = ( c1 * pm_long + c2 * pm_lat)/np.sqrt(c1**2+c2**2)\n df[pm_lat_out] = (-c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)\n if propagate_uncertainties:\n df.propagate_uncertainties([df[pm_long_out], df[pm_lat_out]])\n return df", "def g(lat, z) :\n return (g0(lat) - (3.085462 * 1.e-4 + 2.27 * 1.e-7 * np.cos(2*lat*np.pi/180.)) * z\n + (7.254 * 1e-11 + 1e-13 * np.cos(2*lat*np.pi/180.)) * z**2\n - (1.517 * 1e-17 + 6 * 1e-20 * np.cos(2*lat*np.pi/180.)) * z**3)", "def hp2gona(hp):\n return GONAngle(hp2gon(hp))", "def convert(self, lat, lon):\r\n a = self.a\r\n b = self.b\r\n long0 = self.long0\r\n k0 = self.k0\r\n dx = self.dx\r\n\r\n e = (1 - b ** 2 / a ** 2) ** 0.5\r\n e2 = e ** 2 / (1 - e ** 2)\r\n n = (a - b) / (a + b)\r\n nu = a / (1 - (e ** 2) * (sin(lat) ** 2)) ** 0.5\r\n p = lon - long0\r\n\r\n A = a * (1 - n + (5 / 4.0) * (n ** 2 - n ** 3) + (81 / 64.0)*(n ** 4 - n ** 5))\r\n B = (3 * a * n / 2.0) * (1 - n + (7 / 8.0) * (n ** 2 - n ** 3) + (55 / 64.0) * (n ** 4 - n ** 5))\r\n C = (15 * a * (n ** 2) / 16.0) * (1 - n + (3 / 4.0) * (n ** 2 - n ** 3))\r\n D = (35 * a * (n ** 3) / 48.0) * (1 - n + (11 / 16.0) * (n ** 2 - n ** 3))\r\n E = (315 * a * (n ** 4) / 51.0) * (1 - n)\r\n\r\n S = A * lat - B * sin(2 * lat) + C * sin(4 * lat) - D * sin(6 * lat) + E * sin(8 * lat)\r\n\r\n K1 = S * k0\r\n K2 = k0 * nu * sin(2 * lat)/4.0\r\n K3 = (k0 * nu * sin(lat) * (cos(lat) ** 3) / 24.0) * \\\r\n (5 - tan(lat) ** 2 + 9 * e2 * (cos(lat) ** 2) + 4 * (e2 ** 2) * (cos(lat) ** 4))\r\n\r\n y = K1 + K2 * (p ** 2) + K3 * (p ** 4)\r\n\r\n K4 = k0 * nu * cos(lat)\r\n K5 = (k0 * nu * (cos(lat) ** 3) / 6.0) * (1 - tan(lat) ** 2 + e2 * (cos(lat) ** 2))\r\n\r\n x = K4 * p + K5 * (p ** 3) + dx\r\n return x, y", "def cartesian_to_geographical(coordinate_triples):\n if len(coordinate_triples.shape) == 1:\n x = coordinate_triples[0]\n y = coordinate_triples[1]\n z = coordinate_triples[2]\n elif len(coordinate_triples.shape) == 2:\n assert coordinate_triples.shape[1] == 3\n x = coordinate_triples[:, 0]\n y = coordinate_triples[:, 1]\n z = coordinate_triples[:, 2]\n radius = np.sqrt(x**2 + y**2 + z**2)\n longitudes = np.arctan2(y, x)\n latitudes = np.arcsin(z/radius)\n return (latitudes, longitudes)", "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def _geodetic_to_cartesian(cls, lat, lon, alt):\n C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n r_d = (C + alt) * np.cos(lat)\n r_k = (S + alt) * np.sin(lat)\n\n norm = np.sqrt(r_d ** 2 + r_k ** 2)\n return norm * np.array(\n [np.cos(lat) * np.cos(lon), np.cos(lat) * np.sin(lon), np.sin(lat)]\n )", "def PolarToGalactic(Polar,SolarPosition): \n \n Cartesian = PolarToCartesian(Polar)\n Galactic = CartesianToGalactic(Cartesian,SolarPosition)\n \n return Galactic", "def eq2gal(self, long_in=\"ra\", lat_in=\"dec\", long_out=\"l\", lat_out=\"b\", name_prefix=\"__celestial_eq2gal\", radians=False, inplace=False):\n return self._trans(long_in, lat_in, long_out, lat_out, name_prefix=name_prefix, radians=radians, _matrix='eq2gal', inplace=inplace)", "def get_cartesian_coord(lat, lon, h):\n a = 6378137.0\n rf = 298.257223563\n lat_rad = radians(lat)\n lon_rad = radians(lon)\n N = sqrt(a / (1 - (1 - (1 - 1 / rf) ** 2) * (sin(lat_rad)) ** 2))\n X = (N + h) * cos(lat_rad) * cos(lon_rad)\n Y = (N + h) * cos(lat_rad) * sin(lon_rad)\n Z = ((1 - 1 / rf) ** 2 * N + h) * sin(lat_rad)\n return X, Y, Z", "def convert_voe_coords_to_eqposn(c):\n if (c.system != voeventparse.definitions.sky_coord_system.utc_fk5_geo\n or c.units != 'deg'):\n raise ValueError(\"Unrecognised Coords type: %s, %s\" % (c.system, c.units))\n return Equatorial(c.ra/DEG_PER_RADIAN,c.dec/DEG_PER_RADIAN, epoch=J2000)", "def g0(lat):\n return (980.6160 * (1. - 0.0026372 * np.cos(2*lat*np.pi/180.)\n + 0.0000059 * np.cos(2*lat*np.pi/180.)**2))", "def scalar_g2r(al, be, ga, lon, lat):\n\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n \n #rotate_matrix = np.linalg.pinv(rotate_matrix)\n \n lat = lat * rad\n lon = lon * rad\n\n # geographical Cartesian coordinates:\n xr = np.cos(lat) * np.cos(lon)\n yr = np.cos(lat) * np.sin(lon)\n zr = np.sin(lat)\n\n # rotated Cartesian coordinates:\n xg = rotate_matrix[0, 0] * xr + rotate_matrix[0, 1] * yr + rotate_matrix[0, 2] * zr\n yg = rotate_matrix[1, 0] * xr + rotate_matrix[1, 1] * yr + rotate_matrix[1, 2] * zr\n zg = rotate_matrix[2, 0] * xr + rotate_matrix[2, 1] * yr + rotate_matrix[2, 2] * zr\n\n # rotated coordinates:\n rlat = np.arcsin(zg)\n rlon = np.arctan2(yg, xg)\n\n a = np.where((np.abs(xg) + np.abs(yg)) == 0)\n if a:\n lon[a] = 0\n\n rlat = rlat / rad\n rlon = rlon / rad\n\n return (rlon, rlat)", "def ecef_coords(lats: ndarray, lons: ndarray) -> Tuple[ndarray, ndarray, ndarray]:\n # Cartopy Geodetic and Geocentric both default to the WGS84 datum\n spherical_latlon_crs = Geodetic()\n ecef_crs = Geocentric()\n xyz = ecef_crs.transform_points(\n spherical_latlon_crs, np.array(lons), np.array(lats)\n )\n return xyz[..., 0], xyz[..., 1], xyz[..., 2]", "def latlong_to_cartsian():\n source = pyproj.CRS.from_epsg(4326) # WGS84\n destination = pyproj.CRS.from_epsg(8059) # GDA2020 / SA Lambert\n return pyproj.Transformer.from_crs(source, destination).transform", "def fromECEFtoLatLongDegrees(x, y, z):\n ret = fromECEFtoLatLong(x, y, z)\n return math.degrees(ret[0]), math.degrees(ret[1]), ret[2]", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def sphericalToCartesian(altitude=0, azimuth=0, radius=0):\n\n rcos_theta = radius * np.cos(altitude)\n x = rcos_theta * np.cos(azimuth)\n y = rcos_theta * np.sin(azimuth)\n z = radius * np.sin(altitude)\n return x, y, z", "def ECEFToGeodetic(self,x, y, z):\n def cbrt(x):\n if x >= 0:\n return pow(x, 1.0/3.0)\n else:\n return -pow(abs(x), 1.0/3.0)\n \n a = 6378137.0#6378.137\n b = 6356752.3142#6356.7523142\n esq = 6.69437999014 * 0.001\n e1sq = 6.73949674228 * 0.001\n f = 1 / 298.257223563 \n\n r = sqrt(x * x + y * y)\n Esq = a * a - b * b\n F = 54 * b * b * z * z\n G = r * r + (1 - esq) * z * z - esq * Esq\n C = (esq * esq * F * r * r) / (pow(G, 3))\n S = cbrt(1 + C + sqrt(C * C + 2 * C))\n P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)\n Q = sqrt(1 + 2 * esq * esq * P)\n r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \\\n P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)\n U = sqrt(pow((r - esq * r_0), 2) + z * z)\n V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)\n Z_0 = b * b * z / (a * V)\n h = U * (1 - b * b / (a * V))\n lat = arctan((z + e1sq * Z_0) / r)\n lon = arctan2(y, x)\n return degrees(lat), degrees(lon)", "def convertCoord(lon, lat, inEPSG, outEPSG):\n from pyproj import Proj, transform\n inProj = Proj(init='epsg:'+str(inEPSG))\n outProj = Proj(init='epsg:'+str(outEPSG))\n x, y = transform(inProj, outProj, lon, lat)\n return x, y\n # epsg:4326 WGS84\n # epsg:2950 MTM8\n # epsg:6622 Quebec Lambert", "def WGS84toOSGB36(lat, lon):\n\t# First convert to radians\n\t# These are on the wrong ellipsoid currently: GRS80. (Denoted by _1)\n\tlat_1 = lat*pi/180\n\tlon_1 = lon*pi/180\n\t\n\t# Want to convert to the Airy 1830 ellipsoid, which has the following:\n\t# The GSR80 semi-major and semi-minor axes used for WGS84(m)\n\ta_1, b_1 = 6378137.000, 6356752.3141\n\te2_1 = 1 - (b_1*b_1)/(a_1*a_1) # The eccentricity of the GRS80 ellipsoid\n\tnu_1 = a_1/sqrt(1-e2_1*sin(lat_1)**2)\n\t\n\t# First convert to cartesian from spherical polar coordinates\n\tH = 0 # Third spherical coord.\n\tx_1 = (nu_1 + H)*cos(lat_1)*cos(lon_1)\n\ty_1 = (nu_1 + H)*cos(lat_1)*sin(lon_1)\n\tz_1 = ((1-e2_1)*nu_1 + H)*sin(lat_1)\n\t\n\t# Perform Helmut transform (to go between GRS80 (_1) and Airy 1830 (_2))\n\ts = 20.4894*10**-6 # The scale factor -1\n\t# The translations along x,y,z axes respectively\n\ttx, ty, tz = -446.448, 125.157, -542.060\n\t# The rotations along x,y,z respectively, in seconds\n\trxs, rys, rzs = -0.1502, -0.2470, -0.8421\n\t# In radians\n\trx, ry, rz = rxs*pi/(180*3600.), rys*pi/(180*3600.), rzs*pi/(180*3600.)\n\tx_2 = tx + (1+s)*x_1 + (-rz)*y_1 + (ry)*z_1\n\ty_2 = ty + (rz)*x_1 + (1+s)*y_1 + (-rx)*z_1\n\tz_2 = tz + (-ry)*x_1 + (rx)*y_1 + (1+s)*z_1\n\t\n\t# Back to spherical polar coordinates from cartesian\n\t# Need some of the characteristics of the new ellipsoid\n\t# The GSR80 semi-major and semi-minor axes used for WGS84(m)\n\ta, b = 6377563.396, 6356256.909\n\te2 = 1 - (b*b)/(a*a) # The eccentricity of the Airy 1830 ellipsoid\n\tp = sqrt(x_2**2 + y_2**2)\n\t\n\t# Lat is obtained by an iterative proceedure:\n\tlat = atan2(z_2, (p*(1-e2))) # Initial value\n\tlatold = 2*pi\n\twhile abs(lat - latold) > 10**-16:\n\t\tlat, latold = latold, lat\n\t\tnu = a/sqrt(1-e2*sin(latold)**2)\n\t\tlat = atan2(z_2+e2*nu*sin(latold), p)\n\t\t\n\t# Lon and height are then pretty easy\n\tlon = atan2(y_2, x_2)\n\tH = p/cos(lat) - nu\n\t\n\t# E, N are the British national grid coordinates - eastings and northings\n\tF0 = 0.9996012717 # scale factor on the central meridian\n\tlat0 = 49*pi/180 # Latitude of true origin (radians)\n\tlon0 = -2*pi/180 # Longtitude of true origin and central meridian (radians)\n\tN0, E0 = -100000, 400000 # Northing & easting of true origin (m)\n\tn = (a-b)/(a+b)\n\t\n\t# meridional radius of curvature\n\trho = a*F0*(1-e2)*(1-e2*sin(lat)**2)**(-1.5)\n\teta2 = nu*F0/rho-1\n\t\n\tM1 = (1 + n + (5/4)*n**2 + (5/4)*n**3) * (lat-lat0)\n\tM2 = (3*n + 3*n**2 + (21/8)*n**3) * sin(lat-lat0) * cos(lat+lat0)\n\tM3 = ((15/8)*n**2 + (15/8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0))\n\tM4 = (35/24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0))\n\t\n\t# meridional arc\n\tM = b * F0 * (M1 - M2 + M3 - M4)\n\t\n\tI = M + N0\n\tII = nu*F0*sin(lat)*cos(lat)/2\n\tIII = nu*F0*sin(lat)*cos(lat)**3*(5 - tan(lat)**2 + 9*eta2)/24\n\tIIIA = nu*F0*sin(lat)*cos(lat)**5*(61 - 58*tan(lat)**2 + tan(lat)**4)/720\n\tIV = nu*F0*cos(lat)\n\tV = nu*F0*cos(lat)**3*(nu/rho - tan(lat)**2)/6\n\tVI = nu*F0*cos(lat)**5*(5 - 18*tan(lat)**2 + tan(lat)**4 + 14*eta2 - 58*eta2*tan(lat)**2)/120\n\t\n\tN = I + II*(lon-lon0)**2 + III*(lon-lon0)**4 + IIIA*(lon-lon0)**6\n\tE = E0 + IV*(lon-lon0) + V*(lon-lon0)**3 + VI*(lon-lon0)**5\n\t\n\t# Job's a good'n.\n\treturn E, N", "def lon_lat_to_cartesian(lon, lat, R = 1):\n lon_r = np.radians(lon)\n lat_r = np.radians(lat)\n\n x = R * np.cos(lat_r) * np.cos(lon_r)\n y = R * np.cos(lat_r) * np.sin(lon_r)\n z = R * np.sin(lat_r)\n return x,y,z", "def test_galactic():\n center = coord.CelestialCoord(coord.Angle.from_hms('17:45:37.1991'),\n coord.Angle.from_dms('-28:56:10.2207'))\n north = coord.CelestialCoord(coord.Angle.from_hms('12:51:26.27549'),\n coord.Angle.from_dms('27:07:41.7043'))\n south = coord.CelestialCoord(coord.Angle.from_hms('00:51:26.27549'),\n coord.Angle.from_dms('-27:07:41.7043'))\n anticenter = coord.CelestialCoord(coord.Angle.from_hms('05:45:37.1991'),\n coord.Angle.from_dms('28:56:10.2207'))\n random = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n\n for c1 in [center, north, south, anticenter, random]:\n a1 = astropy.coordinates.SkyCoord(*c1.rad, unit=units.rad, frame='fk5')\n #print('c1.galactic() = ',c1.galactic())\n #print('a1.galactic = ',a1.galactic)\n el, b = c1.galactic()\n # Wrap el to the same phase as a1\n el = el.wrap(a1.galactic.l.rad * coord.radians)\n if c1 not in [north, south]:\n np.testing.assert_almost_equal(el.rad, a1.galactic.l.rad, decimal=6)\n np.testing.assert_almost_equal(b.rad, a1.galactic.b.rad, decimal=6)\n\n c2 = coord.CelestialCoord.from_galactic(el,b)\n a2 = astropy.coordinates.SkyCoord(el.rad, b.rad, unit=units.radian, frame='galactic')\n a2 = a2.transform_to('fk5')\n c2_ra = c2.ra.wrap(a2.ra.rad * coord.radians)\n np.testing.assert_almost_equal(c2_ra.rad, a2.ra.rad, decimal=6)\n np.testing.assert_almost_equal(c2.dec.rad, a2.dec.rad, decimal=6)", "def gon(self):\n return dec2gon(self.dec_angle)", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def gps_to_coords(self,lat,lon):\n\n if (lat <= self.min_lat or lat >= self.max_lat or lon <= self.min_lon or lon >= self.max_lon):\n return (-1,-1)\n\n lat_spot = int((self.max_lat-lat)/self.lat_step)\n lon_spot = int((lon-self.min_lon)/self.lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def convert_to_cartesian(grid: List[Tuple[float, float]], radius: float = 1.0) -> List[Tuple[float, float, float]]:\n\n # conversion radians -> degrees\n r2d = 180.0 / np.pi\n\n # calculate x/y/z coordinates, assuming r=1\n return [\n (\n radius * np.cos(lat / r2d) * np.cos(lon / r2d),\n radius * np.cos(lat / r2d) * np.sin(lon / r2d),\n radius * np.sin(lat / r2d),\n )\n for lon, lat in grid\n ]", "def pixelToDegClean(self, x, y):\n lon = self.minLon + x * self.lonInc\n lat = self.pixelLat[self.res[1] - 1 - y]\n return lon,lat", "def gal2eq(self, long_in='l', lat_in='b', long_out='ra', lat_out='dec', name_prefix=\"__celestial_gal2eq\", radians=False, inplace=False):\n return self._trans(long_in, lat_in, long_out, lat_out, name_prefix=name_prefix, radians=radians, _matrix='gal2eq', inplace=inplace)", "def transform_coordinates(coords):\n # WGS 84 reference coordinate system parameters\n A = 6378.137 # major axis [km]\n E2 = 6.69437999014e-3 # eccentricity squared\n\n coords = prepare_coords(coords)\n\n # convert to radiants\n lat_rad = np.radians(coords[:, 0])\n lon_rad = np.radians(coords[:, 1])\n\n # convert to cartesian coordinates\n r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))\n x = r_n * np.cos(lat_rad) * np.cos(lon_rad)\n y = r_n * np.cos(lat_rad) * np.sin(lon_rad)\n z = r_n * (1 - E2) * np.sin(lat_rad)\n\n return np.column_stack((x, y, z))", "def s2_epsg_code(self):\n ul, lr = self.ul_lr\n epsg_old = self.epsg\n if epsg_old != 4326:\n lon, lat = ImageIO.transform_point(ul, epsg_old)\n else:\n lat, lon = ul\n lon_mod = int(lon / 6)\n\n lon_code = str(30 + lon_mod if lon < 0 else 31 - lon_mod).zfill(2)\n epsg = \"327\" if lat < 0 else \"326\"\n return int(epsg + lon_code)", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def convert(coords):\n lat = coords[:4]\n lon = coords[4:]\n\n lat = lat[:2] + \".\" + lat[2:]\n\n if int(lon[0]) > 5:\n lon = \"-\" + lon[:2] + \".\" + lon[2:]\n else:\n lon = \"-1\" + lon[:2] + \".\" + lon[2:]\n\n return (float(lat), float(lon))", "def lonlat2cr_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(new_cs, old_cs)\n\n def composite(lon, lat):\n \"\"\"xform from (lon, lat) to (c, r)\"\"\"\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return (~gta * transform.TransformPoint(lat, lon)[:2])\n \n return composite", "def grid2alg(grid_x=None, grid_y=None):\n return (\n chr(0x61 + grid_x) if grid_x is not None else '',\n chr(7 - grid_y + 0x31) if grid_y is not None else '',\n )", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def global_coords(self) -> GlobalCoordsABC:", "def hp2gon(hp):\n return dec2gon(hp2dec(hp))", "def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def merc(lat, lon):\n\tr_major = 6378137.000\n\tx = r_major * math.radians(lon)\n\tscale = x/lon\n\ty = 180.0/math.pi * math.log(math.tan(math.pi/4.0 + lat * (math.pi/180.0)/2.0)) * scale\n\treturn (x, y)", "def gon(self):\n return dec2gon(self.dec())", "def gon(self):\n return dec2gon(self.dec())", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def vec2geo(r_hat):\n ra = degrees(atan2(r_hat[1], r_hat[0]))\n dec = degrees(atan2(r_hat[2], sqrt(r_hat[0] ** 2 + r_hat[1] ** 2)))\n return ra, dec", "def deg_to_compass(self, num):\n convert = int((num / 22.5) + .5)\n compass = [\"N\", \"NNE\", \"NE\", \"ENE\", \"E\", \"ESE\", \"SE\", \"SSE\",\n \"S\", \"SSW\", \"SW\", \"WSW\", \"W\", \"WNW\", \"NW\", \"NNW\"]\n return compass[(convert % 16)]", "def _getXYZ ( lon, lat ):\n d2r = pi / 180.\n rlon, rlat = ( d2r * lon, d2r * lat )\n x = cos(rlat) * cos(rlon)\n y = cos(rlat) * sin(rlon)\n z = sin(rlat)\n return (x,y,z)", "def distance_to_galactic_center(galactic_coords, d):\n l, b = galactic_coords[0] * 3600, galactic_coords[1] * 3600\n h_star_gcp, d_star_sun = d * np.sin(b), d * np.cos(b)\n d_star_gc = np.sqrt(d_star_sun**2 + d_sun_GC**2 - 2*d_star_sun*d_sun_GC*np.cos(l))\n return d_star_gc", "def get_angel(coordinates):\n x = coordinates[0]\n y = coordinates[1]\n\n if x == 0:\n if y < 0:\n return 0\n else:\n return math.pi\n\n if y == 0:\n if x < 0:\n return (3 * math.pi) / 2\n else:\n return math.pi / 2\n\n if x >= 0:\n if y >= 0:\n return ((math.pi / 2) + math.atan(abs(y)/abs(x)))\n else:\n return math.atan(abs(x)/abs(y))\n else:\n if y >= 0:\n return math.pi + math.atan(abs(x)/abs(y))\n else:\n return (3/2) * math.pi + math.atan(abs(y)/abs(x))", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def galactic_to_MS():\n return MS_MATRIX", "def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)", "def calcLatLon(northing, easting):\n from math import asin, atan2, cos, log, pow, sin, sqrt\n\n # CONSUS Albers variables (EPSG: 5070)\n RE_NAD83 = 6378137.0\n E_NAD83 = 0.0818187034 # Eccentricity\n D2R = 0.01745329251 # Pi/180\n standardParallel1 = 43.\n standardParallel2 = 47.\n centralMeridian = -114.\n originLat = 30\n originLon = 0\n\n m1 = cos(standardParallel1 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel1 * D2R)), 2.0))\n m2 = cos(standardParallel2 * D2R) / \\\n sqrt(1.0 - pow((E_NAD83 * sin(standardParallel2 * D2R)), 2.0))\n\n def calcPhi(i):\n sinPhi = sin(i * D2R)\n return (1.0 - pow(E_NAD83, 2.0)) * \\\n ((sinPhi/(1.0 - pow((E_NAD83 * sinPhi), 2.0))) -\n 1.0/(2.0 * E_NAD83) *\n log((1.0 - E_NAD83 * sinPhi)/(1.0 + E_NAD83 * sinPhi)))\n\n q0 = calcPhi(originLat)\n q1 = calcPhi(standardParallel1)\n q2 = calcPhi(standardParallel2)\n nc = (pow(m1, 2.0) - pow(m2, 2.0)) / (q2 - q1)\n C = pow(m1, 2.0) + nc * q1\n rho0 = RE_NAD83 * sqrt(C - nc * q0) / nc\n rho = sqrt(pow(easting, 2.0) + pow((rho0 - northing), 2.0))\n q = (C - pow((rho * nc / RE_NAD83), 2.0)) / nc\n beta = asin(q / (1.0 - log((1.0 - E_NAD83) / (1.0 + E_NAD83)) *\n (1.0 - pow(E_NAD83, 2.0))/(2.0 * E_NAD83)))\n a = 1.0 / 3.0 * pow(E_NAD83, 2.0) + 31.0 / 180.0 * \\\n pow(E_NAD83, 4.0) + 517.0 / 5040.0 * pow(E_NAD83, 6.0)\n b = 23.0/360.0 * pow(E_NAD83, 4.0) + 251.0 / 3780.0 * pow(E_NAD83, 6.0)\n c = 761.0/45360.0 * pow(E_NAD83, 6.0)\n theta = atan2(easting, (rho0 - northing))\n\n lat = (beta + a * sin(2.0 * beta) + b * sin(4.0 * beta) +\n c * sin(6.0 * beta))/D2R\n lon = centralMeridian + (theta / D2R) / nc\n coords = [lat, lon]\n\n return coords", "def ecliptic_latlon(self):\n vector = _ECLIPJ2000.dot(self.position.au)\n d, lat, lon = to_polar(vector)\n return (Angle(radians=lat, signed=True),\n Angle(radians=lon),\n Distance(au=d))", "def GalacticToPolar(Galactic,SolarPosition): \n\n Cartesian = GalacticToCartesian(Galactic,SolarPosition)\n Polar = CartesianToPolar(Cartesian)\n\n return Polar", "def _coord_to_EAN(coords):\n row, col = coords\n col = chr(col + ord('a'))\n row = str(8 - row)\n return col + row\n return col + row", "def lam2E(l):\n E=12398.4/(l*u['ang'])\n return E", "def gha(self):\n return np.mod(self.gmst*self.turndeg +\n self.turndeg*self.T*self.century +\n self.turndeg/2.0, self.turndeg)", "def ground_vec(Lat, Lon, eccentricity=None, major_axis=None):\n Lat, Lon = lat_lon_angle_check(Lat, Lon)\n are_two_arrays_equal(Lat,Lon)\n if (major_axis is None) or (eccentricity is None):\n major_axis, flattening = wgs84_param()\n eccentricity = (2*flattening) - (flattening**2)\n\n Lat, Lon = np.deg2rad(Lat.flatten()), np.deg2rad(Lon.flatten())\n Radius = np.divide(major_axis,\n np.sqrt(1.0 - eccentricity*np.sin(Lat)*np.sin(Lat)))\n Gx = np.array([Radius*np.cos(Lat)*np.cos(Lon),\n Radius*np.cos(Lat)*np.sin(Lon),\n Radius* (1 - eccentricity) * np.sin(Lat)])\n return Gx", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)", "def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z", "def convert(coordinates):\n center = np.mean(coordinates, axis=0, dtype=np.float32)\n x = np.subtract(np.array(coordinates, dtype=np.float32), center)\n rho, phi = cart2pol(x[:, 0], x[:, 1])\n result = np.swapaxes(np.array([rho, phi], dtype=np.float32), 0, 1)\n\n # normalize rho values to range[0-1]\n result[:, 0] = normalize(result[:, 0].reshape(1, -1), norm='max')\n return result", "def geog(self) -> typing.Union[None, typing.Tuple[float, float]]:\n geog = self.data[4]\n geog = re.findall(r'(\\d{2})(\\d{2})(\\d{2}\\.\\d+)([NS]) (\\d{3})(\\d{2})(\\d{2}\\.\\d+)([EW])', geog)\n\n if geog:\n lat_deg, lat_min, lat_sec, lat_dir, long_deg, long_min, long_sec, long_dir = geog[0]\n\n lat = Point.parse_degrees(lat_deg, lat_min, lat_sec, direction=lat_dir)\n long = Point.parse_degrees(long_deg, long_min, long_sec, direction=long_dir)\n return lat, long\n return None", "def toECEFfromDegrees(latitude, longitude, altitude=0.0):\n return toECEF(math.radians(latitude), math.radians(longitude), altitude)", "def composite(c, r):\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def gon2rad(gon):\n return radians(gon2dec(gon))", "def convert_unitcell_to_abc(self):\n a = np.sqrt(np.dot(self.lattice[0], self.lattice[0]))\n b = np.sqrt(np.dot(self.lattice[1], self.lattice[1]))\n c = np.sqrt(np.dot(self.lattice[2], self.lattice[2]))\n gamma = np.arccos(np.dot(self.lattice[0], self.lattice[1]) / (a*b))\n beta = np.arccos(np.dot(self.lattice[0], self.lattice[2]) / (a*c))\n alpha = np.arccos(np.dot(self.lattice[1], self.lattice[2]) / (b*c))\n return a, b, c, np.degrees(alpha), np.degrees(beta), np.degrees(gamma)", "def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]", "def convertLatLon(latCell, lonCell):\n cell_lats = np.array([])\n cell_lons = np.array([])\n for lat in latCell:\n cell_lats = np.append(cell_lats, lat * (180 / np.pi)) \n for lon in lonCell:\n cell_lons = np.append(cell_lons, lon * (180 / np.pi)) \n\n return cell_lats, cell_lons", "def cartesian2Geo(julian_date, x, y, z):\n\n\n # Calculate LLA\n lat, r_LST, ele = ecef2LatLonAlt(x, y, z)\n\n # Calculate proper longitude from the given JD\n lon, _ = LST2LongitudeEast(julian_date, np.degrees(r_LST))\n\n # Convert longitude to radians\n lon = np.radians(lon)\n\n\n return np.degrees(lat), np.degrees(lon), ele", "def calc_GMST(self, date):\n jd = self.julian_date(date)\n T = (jd - 2451545.0)/36525.0\n gmstdeg = 280.46061837+(360.98564736629*(jd-2451545.0))+(0.000387933*T*T)-(T*T*T/38710000.0)\n gmst = ephem.degrees(gmstdeg*np.pi/180.0)\n return gmst", "def cr2lonlat_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n\n def composite(c, r):\n \"\"\"xform from (c, r) to (lon, lat)\"\"\"\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat\n \n return composite", "def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d" ]
[ "0.7226109", "0.69753695", "0.62914294", "0.6262002", "0.6209722", "0.60955137", "0.60862833", "0.59612167", "0.59590286", "0.5948453", "0.5936651", "0.59322864", "0.58702904", "0.58493227", "0.5813463", "0.57856524", "0.5758909", "0.57558006", "0.5741679", "0.5709453", "0.56790584", "0.56390375", "0.5615913", "0.5612044", "0.5583041", "0.5567526", "0.5536925", "0.55257577", "0.5511409", "0.5498759", "0.54973835", "0.5493619", "0.54929197", "0.5485832", "0.5484495", "0.5481017", "0.547333", "0.54334617", "0.543293", "0.5432013", "0.5421756", "0.5417644", "0.5407036", "0.54006225", "0.53786385", "0.53771037", "0.53739905", "0.5370051", "0.5367035", "0.5354444", "0.53437304", "0.53387713", "0.5335187", "0.5334274", "0.532139", "0.5316734", "0.5295904", "0.5290538", "0.52616596", "0.5258393", "0.52482563", "0.5246001", "0.52371496", "0.5205804", "0.5203156", "0.5201535", "0.5195716", "0.517718", "0.517718", "0.517341", "0.51722795", "0.5170701", "0.5165278", "0.5159242", "0.51474214", "0.5140396", "0.51398396", "0.5137905", "0.5136914", "0.513617", "0.51335806", "0.5132168", "0.51269567", "0.5126687", "0.5125426", "0.51222754", "0.5093112", "0.50928605", "0.5085925", "0.5085735", "0.5083672", "0.50836265", "0.5077435", "0.5077158", "0.50754696", "0.50740206", "0.50724083", "0.5067712", "0.50483257", "0.5044683" ]
0.67290187
2
Compute inverse (using extended Euclidean algorithm
def compute_inverse(in1, in2): aL = [in1] bL = [in2] tL = [0] t = 1 sL = [1] s = 0 q = math.floor((aL[0] / bL[0])) r = (aL[0] - (q * bL[0])) while r > 0: temp = (tL[0] - (q * bL[0])) tL[0] = t t = temp temp = (sL[0] - (q * s)) sL[0] = s s = temp aL[0] = bL[0] bL[0] = r q = math.floor(aL[0] / bL[0]) r = (aL[0] - (q * bL[0])) inverse = s % in2 return inverse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self, x, y):", "def erfcinv(a):", "def modular_inverse(e, z):\n g, x, y = extended_euclidean_algorithm(e, z)\n if g != 1: raise Exception('Modular inverse does not exist')\n else: return x % z", "def inverse_in_zn(g, n):\n assert(n >= g)\n x, y, d = extended_euclidean(n, g)\n if y < 0:\n return y + n\n return y", "def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def complex_inverse(c1,cr):", "def multiplicative_inverse(e, n):\n x, y = extended_gcd(e, n)\n if x < 0:\n return n + x\n return x", "def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c", "def inverse_of(n, p):\n gcd, x, y = extended_euclidean_algorithm(n, p)\n assert (n * x + p * y) % p == gcd\n\n if gcd != 1:\n # Either n is 0, or p is not a prime number.\n raise ValueError(\n '{} has no multiplicative inverse '\n 'modulo {}'.format(n, p))\n else:\n return x % p", "def invert(x):\n return linalg.inv(x)", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def invert(self,el):\n return el^(self.q-2)", "def inv(a,b,c,d):\n\tdet = a*d-b*c\n\tm = lambda x: fractions.Fraction(x, det)\n\treturn map(str, map(m, [d, -b, -c, a]))", "def inv(self, y):\n pass", "def __computeSinglePointInverse2DCT(self, imge, x, y, N):\n result = 0\n\n for u in xrange(N):\n for v in xrange(N):\n if (u==0) and (v==0):\n tau = 1.0/N\n elif (u==0) or (v==0):\n tau = math.sqrt(2.0)/N\n else:\n tau = 2.0/N \n result += tau * imge[u, v] * math.cos(((2*x + 1)*u*math.pi)/(2*N)) * math.cos(((2*y + 1)*v*math.pi)/(2*N))\n\n return result", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def inv_heaviside(n, axis=0, normalized=True):\n w = jnp.sqrt(jnp.arange(n, 0, -1))\n\n times_u = lambda x: jnp.diff(x, prepend=0)\n trans_u = lambda x: -jnp.diff(x, append=0)\n\n times_n = lambda x: jnp.diff(x, prepend=0) * w\n trans_n = lambda x: -jnp.diff(x * w, append=0)\n\n times, trans = (times_n, trans_n) if normalized else (times_u, trans_u) \n times, trans = apply_along_axis(times, trans, axis)\n return Operator(times=times, trans=trans, shape=(n, n))", "def mod_inverse(x, m):\n inv, _ = extended_gcd(x, m)\n return inv", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def modular_inverse(a, m):\n\n def extended_gcd(_a, _b):\n \"\"\" Use the Extended Euclidean algorithm to calculate the \"extended greatest common divisor\".\n It takes as input two positive integers a and b, then calculates the following:\n 1. The greatest common divisor (gcd) between a and b -- that is, the integer number g which is the largest\n integer for which a/g and b/g both are integers (This can also be obtained using math.gcd)\n 2. The integer x and y so that a*x + b*y = gcd(x, y)\n :param _a: Positive integer\n :param _b: Positive integer\n :return: Tuple (gcd, x, y)\n \"\"\"\n previous_remainder, remainder = _a, _b\n current_x, previous_x, current_y, previous_y = 0, 1, 1, 0\n while remainder > 0:\n previous_remainder, (quotient, remainder) = remainder, divmod(\n previous_remainder, remainder)\n current_x, previous_x = previous_x - quotient * current_x, current_x\n current_y, previous_y = previous_y - quotient * current_y, current_y\n # The loop terminates with remainder == 0, x == b and y == -a. This is not what we want, and is because we have\n # walked it through one time \"too many\". Therefore, return the values\n # of the previous round:\n return previous_remainder, previous_x, previous_y\n\n gcd_value, x, y = extended_gcd(a, m)\n if gcd_value != 1:\n return False\n # print('No inverse. gcd (%d, %d) is %d. Decoding is not unique. Choose another key than %d'\n # % (a, m, math.gcd(a, m), a))\n return x % m", "def inv(z: int) -> int:\n # Adapted from curve25519_athlon.c in djb's Curve25519.\n z2 = z * z % q # 2\n z9 = pow2(z2, 2) * z % q # 9\n z11 = z9 * z2 % q # 11\n z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0\n z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0\n z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...\n z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q\n z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q\n z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q\n z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q\n z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0\n return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2", "def extended_euclidean_algorithm(a, b):\n if a == 0: return b, 0, 1\n else:\n g, y, x = extended_euclidean_algorithm(b % a, a)\n return g, x - (b // a) * y, y", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def inverse(num, r):\n if int(num) == 0:\n return 0\n else:\n # Multiply with every number in the field and\n # check if the result is one. Easy Peasy!\n # Todo: Use Extended Euclidean Algo\n # or Logs/Anti-Logs\n for i in range(1, 256):\n if _multiply(num, i, r) == 1:\n return i", "def _r_inv(self):\n raise NotImplementedError", "def multiplicative_inverse(e, phi):\n\t\n\td, x1, x2, y1 = 0, 0, 1, 1\n\toriginal_phi = phi\n\t\n\twhile e > 0:\n\t\ttemp1 = phi // e\n\t\tphi, e = e, phi % e\n\t\tx2, x1 = x1, (x2 - temp1 * x1)\n\t\td, y1 = y1, (d - temp1 * y1)\n \n\treturn d % original_phi", "def get_inverse_2x2(u, v):\n if not is_linearly_independent_2x2(u, v):\n return\n uv = get_uv(u, v)\n iden = get_uv([1, 0],[0, 1])\n a = np.zeros((2, 4))\n for i in range(2):\n for j in range(2):\n a[i][j] = uv[i][j]\n a[i][j+2] = iden[i][j]\n\n q = a[0][1] / a[1][1]\n a[0] = a[0] - q * a[1]\n\n q = a[1][0] / a[0][0]\n a[1] = a[1] - q * a[0]\n\n a[0] /= a[0][0]\n\n a[1] /= a[1][1]\n\n for i in range(2):\n for j in range(2):\n uv[i][j] = a[i][j+2]\n return uv", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def mod_inverse_iterative(a, b):\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q = int(b / a)\n r = b % a\n m = x - u * q\n n = y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n return b, x, y", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def arcsinh_inplace(a):", "def inverse_transform(v):\n v, k = divmod(v - 1, N)\n v, j = divmod(v, N)\n v, i = divmod(v, N)\n return i, j, k", "def invMod(a, b):\n (x, y, gcd) = extEuclid(a, b)\n if gcd == 1:\n if x < 0:\n x += b\n return x\n else:\n raise Exception('%d has no inverse mod %d' % (a, b))", "def mod_inverse(a, n):\n \n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def mod_inverse(a, n):\n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n", "def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def get_inverse(a):\n if len(a) == len(a[0]):\n i = get_identity(len(a))\n inverse = gaussian_solve(a, i)\n return inverse", "def invupdatered(A, c):\n n, m = A.shape\n indn = np.arange(n)\n q = A[c, c]\n c1 = np.hstack((indn[:c], indn[c+1:]))\n Ax = np.atleast_2d(A[c1, c])\n yA = np.atleast_2d(A[c, c1])\n return A[c1][:,c1] - np.dot(Ax.T, yA)/q", "def inv_inplace(a):", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def extended_euclidean_algorithm(a, b):\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = b, a\n\n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n\n return old_r, old_s, old_t", "def diffeo_inverse(a):\n M, N = a.shape[0], a.shape[1]\n result = np.empty_like(a)\n result.fill(-1) # fill invalid data\n many = np.zeros((M, N))\n many.fill(0)\n for i, j in coords_iterate((M, N)):\n i1 = a[i, j, 0]\n j1 = a[i, j, 1]\n result[i1, j1, 0] = i\n result[i1, j1, 1] = j\n many[i1, j1] += 1\n\n num = (many == 0).sum()\n if num:\n fill_invalid(result[:, :, 0], -1)\n fill_invalid(result[:, :, 1], -1)\n\n return result", "def numerical_inverse(self, *args, **kwargs):\n tolerance = kwargs.get('tolerance', 1e-5)\n maxiter = kwargs.get('maxiter', 50)\n adaptive = kwargs.get('adaptive', True)\n detect_divergence = kwargs.get('detect_divergence', True)\n quiet = kwargs.get('quiet', True)\n with_bounding_box = kwargs.get('with_bounding_box', True)\n fill_value = kwargs.get('fill_value', np.nan)\n with_units = kwargs.pop('with_units', False)\n\n if not utils.isnumerical(args[0]):\n args = self.output_frame.coordinate_to_quantity(*args)\n if self.output_frame.naxes == 1:\n args = [args]\n args = utils.get_values(self.output_frame.unit, *args)\n\n args_shape = np.shape(args)\n nargs = args_shape[0]\n arg_dim = len(args_shape) - 1\n\n if nargs != self.world_n_dim:\n raise ValueError(\"Number of input coordinates is different from \"\n \"the number of defined world coordinates in the \"\n f\"WCS ({self.world_n_dim:d})\")\n\n if self.world_n_dim != self.pixel_n_dim:\n raise NotImplementedError(\n \"Support for iterative inverse for transformations with \"\n \"different number of inputs and outputs was not implemented.\"\n )\n\n # initial guess:\n if nargs == 2 and self._approx_inverse is None:\n self._calc_approx_inv(max_inv_pix_error=5, inv_degree=None)\n\n if self._approx_inverse is None:\n if self.bounding_box is None:\n x0 = np.ones(self.pixel_n_dim)\n else:\n x0 = np.mean(self.bounding_box, axis=-1)\n\n if arg_dim == 0:\n argsi = args\n\n if nargs == 2 and self._approx_inverse is not None:\n x0 = self._approx_inverse(*argsi)\n if not np.all(np.isfinite(x0)):\n return [np.array(np.nan) for _ in range(nargs)]\n\n result = tuple(self._vectorized_fixed_point(\n x0, argsi,\n tolerance=tolerance,\n maxiter=maxiter,\n adaptive=adaptive,\n detect_divergence=detect_divergence,\n quiet=quiet,\n with_bounding_box=with_bounding_box,\n fill_value=fill_value\n ).T.ravel().tolist())\n\n else:\n arg_shape = args_shape[1:]\n nelem = np.prod(arg_shape)\n\n args = np.reshape(args, (nargs, nelem))\n\n if self._approx_inverse is None:\n x0 = np.full((nelem, nargs), x0)\n else:\n x0 = np.array(self._approx_inverse(*args)).T\n\n result = self._vectorized_fixed_point(\n x0, args.T,\n tolerance=tolerance,\n maxiter=maxiter,\n adaptive=adaptive,\n detect_divergence=detect_divergence,\n quiet=quiet,\n with_bounding_box=with_bounding_box,\n fill_value=fill_value\n ).T\n\n result = tuple(np.reshape(result, args_shape))\n\n if with_units and self.input_frame:\n if self.input_frame.naxes == 1:\n return self.input_frame.coordinates(result)\n else:\n return self.input_frame.coordinates(*result)\n else:\n return result", "def __invert__(self):\n return self.inverse()", "def inv(q):\n return q * np.array([-1,-1,-1,1]) / amplitude(q) ** 2", "def inv(self):\n return self.conjugate()", "def inverse_e(self, e):\n return (e - self.e_min) / self.e_range", "def inverse(self):\n ss = self._sum_of_squares()\n if ss > 0:\n return self.__class__(array=(self._vector_conjugate() / ss))\n else:\n raise ZeroDivisionError(\"a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted\")", "def _mult_inverse(self, a, m):\n g, x, y = self._egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m", "def find_inverse(a,n):\n if a==0 or gcd(a,n)!=1: return \".\"\n if a==1: return 1\n k=1\n while True:\n a_inv = (1+n*k)/a \n if a_inv.is_integer():\n return int(a_inv)\n else:\n k=k+1", "def multiplicative_inverse(a, b):\n # r = gcd(a,b) i = multiplicitive inverse of a mod b\n # or j = multiplicitive inverse of b mod a\n # Neg return values for i or j are made positive mod b or a respectively\n # Iterateive Version is faster and uses much less stack space\n x = 0\n y = 1\n lx = 1\n ly = 0\n oa = a # Remember original a/b to remove\n ob = b # negative values from return results\n while b != 0:\n q = a // b\n (a, b) = (b, a % b)\n (x, lx) = ((lx - (q * x)), x)\n (y, ly) = ((ly - (q * y)), y)\n if lx < 0:\n lx += ob # If neg wrap modulo orignal b\n if ly < 0:\n ly += oa # If neg wrap modulo orignal a\n # return a , lx, ly # Return only positive values\n return lx", "def euclidean(x, y):\n ed = np.sqrt(np.sum((x-y)**2))\n # print ed\n return ed", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def inv(a):\n a, cv, isMatrix = get_computation_matrix(a)\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n if a.numRows() != a.numCols():\n raise ValueError(\"inv: input a is not a square matrix!\")\n #compute LU using getrf\n (lu, piv, _) = getrf(a, overwrite_a=1, dtype=t_dtype)\n (ainv, _) = getri(lu, piv, lwork=0, overwrite_lu=1, dtype=t_dtype)\n if cv:\n if isMatrix:\n return ainv.to_numpy_matrix()\n else:\n return ainv.to_numpy_array()\n else:\n return ainv", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def mod_inverse(base, m):\n g, x, y = mod_inverse_iterative(base, m)\n if g != 1:\n return None\n else:\n return x % m", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\n q_vector = np.zeros(4)\n q_vector[:3] = self.imaginary*-1\n q_vector[3] = self.w\n return Quat(q_vector,\"xyzw\")", "def __truediv__(self, o): \n return self * o.inv()", "def haar_inv(c, normalize=False):\n return haar_inv_step(c, -1, normalize=normalize)", "def inverse(self):\n return self._inverse", "def mod_inverse(base, m):\n\n g, x, y = mod_inverse_iterative(base, m)\n if (g != 1):\n return None\n else:\n return (x % m)", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def inverseMod(a,b):\n if GMPY:\n return int(gmpy2.invert(a,b))\n else:\n gcd, x, y = computeGCD(a, m)\n if gcd != 1:\n None # there is no inverse of a mod b\n else:\n return x % m", "def arcsinh(a):", "def inverse_mod( a, m ):\r\n\r\n if a < 0 or m <= a: a = a % m\r\n\r\n # From Ferguson and Schneier, roughly:\r\n\r\n c, d = a, m\r\n uc, vc, ud, vd = 1, 0, 0, 1\r\n while c != 0:\r\n q, c, d = divmod( d, c ) + ( c, )\r\n uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc\r\n\r\n # At this point, d is the GCD, and ud*a+vd*m = d.\r\n # If d == 1, this means that ud is a inverse.\r\n\r\n assert d == 1\r\n if ud > 0: return ud\r\n else: return ud + m", "def computeInverse2DDCT(self, imge):\n \n # Assuming a square image\n N = imge.shape[0]\n finalInverse2DDCT = np.zeros([N, N], dtype=float)\n for x in xrange(N):\n for y in xrange(N):\n #Compute the DCT value for each cells/points in the resulting transformed image.\n finalInverse2DDCT[x, y] = DCT.__computeSinglePointInverse2DCT(imge, x, y, N)\n return finalInverse2DDCT", "def inverse_mod(a,n):\n\t(g,xa,xb) = xgcd(a,n)\n\tif(g != 1): raise ValueError(\"***** Error *****: {0} has no inverse (mod {1}) as their gcd is {2}, not 1.\".format(a,n,g))\n\treturn xa % n", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def inverse_mod(a,n):\r\n\t(g,xa,xb) = xgcd(a,n)\r\n\tif(g != 1): raise ValueError(\"***** Error *****: {0} has no inverse (mod {1}) as their gcd is {2}, not 1.\".format(a,n,g))\r\n\treturn xa % n", "def inverse(self) -> 'Invertible':\n raise NotImplementedError", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m", "def inverse(self, u: Tensor, covariates: Tensor) -> Tensor:\n return self.real_nvp.inverse(u, covariates)", "def test_inverse_transform(self):", "def opposite(x):\n return -1*x", "def inv_img(img):\n return np.abs(img - 1.)", "def euclidean(x,y): \n\treturn np.sqrt(np.sum((x-y)**2))", "def inverse_el(el: Fp, p: int) -> Fp:\n return pow(int(el), p-2, p)", "def get_euclidean_vector(vector):\n\n return np.subtract(vector[1], vector[0])" ]
[ "0.7188857", "0.71758795", "0.70527136", "0.70293945", "0.68511194", "0.6820181", "0.6751133", "0.67222536", "0.6711877", "0.6700519", "0.6607881", "0.6585525", "0.65800756", "0.65740085", "0.65705127", "0.6567452", "0.6539267", "0.6527102", "0.64968294", "0.64808273", "0.647806", "0.64767593", "0.6469049", "0.6453477", "0.64503664", "0.6403538", "0.63966626", "0.6363985", "0.63608575", "0.63445044", "0.63362676", "0.63328326", "0.63171977", "0.63073456", "0.6300332", "0.6295862", "0.62860936", "0.6285149", "0.6275853", "0.6271617", "0.6266559", "0.62622714", "0.62557745", "0.6251742", "0.62488294", "0.6246919", "0.6238358", "0.6238262", "0.6237861", "0.6223394", "0.62229294", "0.6217955", "0.620796", "0.6188394", "0.61826247", "0.6179428", "0.61756814", "0.6173161", "0.6169081", "0.6168069", "0.6163925", "0.6152983", "0.6133686", "0.6127479", "0.60903776", "0.6086872", "0.60787916", "0.6077304", "0.6059596", "0.6056468", "0.6050918", "0.60417175", "0.6032256", "0.6028235", "0.60264045", "0.60150677", "0.60089076", "0.5992183", "0.59880304", "0.59862524", "0.5983616", "0.5978363", "0.59755164", "0.59731513", "0.596085", "0.59572333", "0.5956516", "0.59395546", "0.593782", "0.59304494", "0.5916989", "0.59166", "0.5913682", "0.5907004", "0.5896314", "0.58959115", "0.5892585", "0.58923155", "0.5891712", "0.5891089" ]
0.70715195
2
Create hash from document by sha1 algorithm.
def sha_hash(file_name: str): BLOCKSIZE = 65536 line = '' # format one line for hash with open(file_name, 'rb') as afile: buf = afile.read(BLOCKSIZE) # read each line of doc while len(buf) > 0: line += buf.decode('utf-8') buf = afile.read(BLOCKSIZE) hex = "0x" + sha1(line.encode()) # create sha1 hash return int(hex, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha1hex(doc):\n doc_id = doc.pop('_id',None)\n doc_rev = doc.get('_rev',None)\n doc_string = str(doc)\n\n if doc_id is not None:\n doc['_id'] = doc_id\n\n if doc_rev is not None:\n doc['_rev'] = doc_rev\n\n return hashlib.sha1(doc_string).hexdigest().upper()", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def _sha1(self):\n return hashlib.sha1(self._blob).hexdigest()", "def _sha1_hash_json(self, value):\n hash = hashlib.new(\"sha1\")\n binary_value = value.encode(\"ascii\")\n hash.update(binary_value)\n sha1_res = hash.hexdigest()\n return sha1_res", "def sha1(self) -> str:\n return self.data.sha1", "def hex_sha1_of_bytes(data: bytes) -> Sha1HexDigest:\n return Sha1HexDigest(hashlib.sha1(data).hexdigest())", "def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()", "def get_content_sha1(self):", "def sha1(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)\n d.update(data)\n return d.digest()", "def get_hash(content):\n return hashlib.sha1(content).hexdigest()", "def sha1(self):\n return self.tag(\"sha1\")", "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def instance(data):\n return Fieldsha1(data)", "def sha1(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha1\")", "def add_sha1(sender, form, **kwargs):\n if not const.TAG_SHA1 in form.all_properties():\n form[\"#sha1\"] = form.xml_sha1()\n form.save()\n else:\n current_sha = form.all_properties().get(const.TAG_SHA1, \"\")\n calculated_sha = form.xml_sha1()\n if current_sha != calculated_sha:\n logging.error(\"bad sha-1 calculation for form %s, was %s but expected %s... overriding\" % \\\n (form.get_id, current_sha, calculated_sha))\n form[\"#sha1\"] = calculated_sha\n form.save()", "def _get_hash(self, query):\n return hashlib.sha1(str(query)).hexdigest()", "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def hex_sha1_of_stream(input_stream: ReadOnlyStream, content_length: int) -> Sha1HexDigest:\n return Sha1HexDigest(\n update_digest_from_stream(\n hashlib.sha1(),\n input_stream,\n content_length,\n ).hexdigest()\n )", "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def hash(self) -> bytes:", "def _get_pubickey_sha1_hash(cert):\n pkey = cert.get_pubkey()\n pkey_asn1 = dump_publickey(FILETYPE_ASN1, pkey)\n decoded_pkey, _ = der_decoder.decode(\n pkey_asn1, rfc2459.SubjectPublicKeyInfo())\n pubkey = bit_string_to_bytearray(decoded_pkey['subjectPublicKey'])\n # algorithm = decoded_pkey['algorithm'] # RSA encryption\n sha1_hash = hashlib.sha1()\n sha1_hash.update(pubkey)\n return sha1_hash", "def hash_file_sha1(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha1, binary=binary, buffer_size=buffer_size)", "def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()", "def sha1HashFile(self, filename: Path):\n bufferSize = 65536\n sha1Hash = hashlib.sha1()\n\n with filename.open('rb') as f:\n while True:\n data = f.read(bufferSize)\n\n if not data:\n break\n\n sha1Hash.update(data)\n\n return str(sha1Hash.hexdigest())", "def calculate_hash(stuff):\n\tsha1 = hashlib.sha1()\n\tsha1.update(stuff)\n\treturn sha1.hexdigest()", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def calc_hash(self, record: dict) -> str:\n return sha1(orjson.dumps(record, option=orjson.OPT_SORT_KEYS)).hexdigest()", "def checksum_from_sha1(value):\n # More constrained regex at lexer level\n CHECKSUM_RE = re.compile('SHA1:\\\\s*([\\\\S]+)', re.UNICODE)\n match = CHECKSUM_RE.match(value)\n if match:\n return checksum.Algorithm(identifier='SHA1', value=match.group(1))\n else:\n return None", "def hash_1(self):\n return self.unpack_qword(0x18)", "def hash_data(obj):\n collect = sha1()\n for text in bytes_iter(obj):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n collect.update(text)\n return collect.hexdigest()", "def GetFileSha1(file_path):\n return base64.b64encode(GetFileHashes(file_path, do_sha1=True)['sha1'])", "def hash(self):\n return hashlib.sha1(str(self._dict))", "def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()", "def sha1(fname):\n fh = open(fname, 'rb')\n sha1 = hashlib.sha1()\n block = fh.read(2 ** 16)\n while len(block) > 0:\n sha1.update(block)\n block = fh.read(2 ** 16)\n\n return sha1.hexdigest()", "def get_sha1_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m: hashlib._hashlib.HASH = hashlib.sha1()\n return calc_hash(src, m)", "def _get_sha1(file_descriptor):\n sha1 = hashlib.sha1()\n for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):\n sha1.update(block)\n file_descriptor.seek(0)\n return sha1.hexdigest()", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def hash(self) -> str:\r\n ...", "def hash(self, oid):\n data = self.family_name + self.name +\\\n self.date_of_birth + self.date_of_issue +\\\n self.date_of_expiry + self.issuing_country +\\\n self.issuing_authority + self.license_number +\\\n \"\".join(self.categories_of_vehicles) +\\\n str(self.number_of_entries)\n if oid == 'id-sha1':\n digest = hashes.Hash(hashes.SHA1(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha224':\n digest = hashes.Hash(hashes.SHA224(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha256':\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha384':\n digest = hashes.Hash(hashes.SHA384(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha512':\n digest = hashes.Hash(hashes.SHA512(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n else:\n print('ERROR: Hash algorithm not implemented.')\n sys.exit(1)", "def find_hash():\n document = request.get_json()\n document_data = str(document)\n sha = hashlib.sha256()\n sha.update(document_data.encode('utf-8'))\n hash_str = sha.hexdigest()\n hash_base58 = encode(hash_str)\n return jsonify(SEND_DATA({\"hash\": hash_base58}))", "def blob_hash(stream, size):\n hasher = sha1()\n hasher.update(('blob %u\\0' % size).encode('ascii'))\n nread = 0\n while True:\n # We read just 64K at a time to be kind to\n # runtime storage requirements.\n data = stream.read(65536)\n if data == b'':\n break\n nread += len(data)\n hasher.update(data)\n if nread != size:\n raise ValueError('%s: expected %u bytes, found %u bytes' %\n (stream.name, size, nread))\n return hasher", "def track_to_hash(track):\n return hashlib.sha1(track.encode('utf-8')).hexdigest()", "def computeHash(infile):\n f = open(infile, 'rb')\n buffer = f.read()\n f.close()\n return hashlib.sha1(buffer).hexdigest()", "def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()", "def hashAndSign(self, bytes):\r\n hashBytes = SHA1(bytearray(bytes))\r\n prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)\r\n sigBytes = self.sign(prefixedHashBytes)\r\n return sigBytes", "def hash(path):\n\n with open(path, 'r') as file:\n return hashlib.sha1(file.read()).hexdigest()", "def get_sha1(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha1_from_stream(i)", "def hash(password):\n result = hashlib.sha1(password.encode())\n # return a hexadecimal digits\n return result.hexdigest()", "def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def _calc_hash(self) -> None:\n self.image = Image.open(self.path)\n self.image = self.image.convert(\"L\")\n self.image = self.image.resize((self.width, self.height), Image.ANTIALIAS)\n lpixels = list(self.image.getdata())\n self.hash = \"0b\"\n for i, pixel in enumerate(lpixels):\n if (i + 1) % self.width == 0 and i != 0:\n continue\n if pixel < lpixels[i + 1]:\n self.hash += \"1\"\n continue\n self.hash += \"0\"\n self.hash_hex = DHash.bin2hex(self.hash)", "def h1(data1=None, data2=None, data3=None, data4=None, data5=None, data6=None):\n\n hsh = SHA512.new()\n hsh.update(b\"4\")\n hsh.update(data1)\n hsh.update(data2)\n hsh.update(data3)\n hsh.update(data4)\n hsh.update(data5)\n hsh.update(data6)\n return hsh", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def get_hash(thing):\n n = hashlib.sha256()\n \n if isinstance(thing,str):\n n.update(thing.encode('utf-8' ))\n elif isinstance(thing, bytes):\n n.update(thing)\n elif isinstance(thing,BeautifulSoup):\n n.update(get_hash(str(thing)))\n else:\n raise RuntimeError(\"unknown type: {}\".format(str(type(thing))))\n \n return(n.digest())", "def _get_hash(self, *args):\n url_hash = hashlib.sha1()\n try:\n for value in args:\n value = unicode(value).encode('utf-8', 'replace')\n url_hash.update(value)\n return url_hash.hexdigest()\n except UnicodeDecodeError:\n return None", "def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()", "def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha1()\n\n # open file for reading in binary mode\n with open(filename, 'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def sha1Function():\r\n\r\n sha1Input = input(\"Enter SHA-1 String: \") # user input for hashing\r\n \r\n sha1Result = hashlib.sha1(sha1Input.encode()) # encoding user input then sending to sha1() function\r\n \r\n print(\"Hashing Successful\")\r\n print(\"The SHA-1 Hashing Result is : \", end =\"\") \r\n print(sha1Result.hexdigest()) # printing the hashing result in hexadecimal value\r\n\r\n menu() # display the menu again\r", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def hash(space, w_object):\n return space.hash(w_object)", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def __hash__(self):\n return hash(self.text)", "def _build_hash(target, meta_type):\n digest = hashlib.sha1(target.encode('ascii')).hexdigest() # nosec\n return meta_type + digest", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def _minhash_from_text(self, text):\n minhash = MinHash(self._config.num_perm)\n for word in self._shingles_from_text(text):\n minhash.update(word.encode('utf8'))\n return minhash", "def HashAlgorithm(self) -> _n_7_t_0:", "def hashfile(filename):\n BLOCKSIZE = 65536\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n sha1.update(buf)\n buf = afile.read(BLOCKSIZE)\n return(sha1.hexdigest())", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def symlink_hash(path):\n hasher = sha1()\n data = path_to_bytes(os.readlink(path))\n hasher.update(('blob %u\\0' % len(data)).encode('ascii'))\n hasher.update(data)\n return hasher", "def hash_file ( filename ):\n sha1 = hashlib.sha1()\n with open( filename, 'rb' ) as f:\n while True:\n buf = f.read(65536) # read by 64kb buffers size\n if not buf:\n break\n sha1.update(buf)\n return sha1", "def _calculate_hash(self, file_object):\n hasher = self.hashlib()\n for chunk in self.iterchunks(file_object):\n hasher.update(chunk)\n return hasher.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def sha1sum(filename):\n with open(filename, mode='rb') as f:\n d = hashlib.sha1()\n for buf in iter(functools.partial(f.read, 1024*100), b''):\n d.update(buf)\n return d.hexdigest()", "def sha1sum(filename):\n if not os.path.isfile(filename):\n return ''\n hasher = hashlib.sha1()\n with open(filename, 'rb') as hash_file:\n buf = hash_file.read(HASH_BLOCK_SIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = hash_file.read(HASH_BLOCK_SIZE)\n return hasher.hexdigest()", "def setHash(self):\n chash_string = str(self.code) + str(\"CAMPAIGN\") + str(self.created_at)\n chash = hashlib.sha1()\n chash.update(chash_string)\n \n self.chash = chash.hexdigest()\n self.save()", "def make_hash(self):\n timestamp = str(int(round(time.time()*1000)))\n auth = b64encode(config.username) + ':' \\\n + b64encode(config.password) + ':' \\\n + b64encode(timestamp)\n rsa = RSA.load_pub_key(config.public_key)\n encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding)\n key = b64encode(encrypted_auth)\n return key", "def do_hash_certificate(self, certificate):\n options = {'algorithm': 'URDNA2015', 'format': 'application/nquads', 'documentLoader': cached_document_loader}\n cert_utf8 = certificate.decode('utf-8')\n cert_json = json.loads(cert_utf8)\n normalized = jsonld.normalize(cert_json, options=options)\n hashed = sha256(normalized)\n self.tree.add_leaf(hashed, False)\n return hashed", "def __hash(self, oid):\n try:\n # str(oid) may contain non-ascii characters\n m = mmh3.hash(str(oid), signed=False)\n except Exception as e:\n m = 0\n return m", "def get_results_hash(self, data):\n data = json.dumps(data, sort_keys=True)\n result = hashlib.sha512(data.encode())\n result_hash = result.hexdigest()\n return result_hash", "def _HashFile(self, fd):\n hashes = fd.Get(fd.Schema.HASH)\n if hashes:\n found_all = True\n for fingerprint_type, hash_types in self.HASH_TYPES.iteritems():\n for hash_type in hash_types:\n if fingerprint_type == \"pecoff\":\n hash_type = \"pecoff_%s\" % hash_type\n if not hashes.HasField(hash_type):\n found_all = False\n break\n if not found_all:\n break\n if found_all:\n return hashes\n\n fingerprinter = fingerprint.Fingerprinter(fd)\n if \"generic\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"generic\"])\n fingerprinter.EvalGeneric(hashers=hashers)\n if \"pecoff\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"pecoff\"])\n if hashers:\n fingerprinter.EvalPecoff(hashers=hashers)\n\n if not hashes:\n hashes = fd.Schema.HASH()\n\n for result in fingerprinter.HashIt():\n fingerprint_type = result[\"name\"]\n for hash_type in self.HASH_TYPES[fingerprint_type]:\n if hash_type not in result:\n continue\n\n if hash_type == \"SignedData\":\n # There can be several certs in the same file.\n for signed_data in result[hash_type]:\n hashes.signed_data.Append(revision=signed_data[0],\n cert_type=signed_data[1],\n certificate=signed_data[2])\n continue\n\n # Set the hashes in the original object\n if fingerprint_type == \"generic\":\n hashes.Set(hash_type, result[hash_type])\n\n elif fingerprint_type == \"pecoff\":\n hashes.Set(\"pecoff_%s\" % hash_type, result[hash_type])\n\n else:\n logging.error(\"Unknown fingerprint_type %s.\", fingerprint_type)\n\n try:\n fd.Set(hashes)\n except IOError:\n pass\n return hashes", "def sha_new(arg=None):\r\n\r\n crypto = sha()\r\n if arg:\r\n crypto.update(arg)\r\n\r\n return crypto", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def get_report_hash(self, consolidated):\n jsonstr = json.dumps(consolidated, sort_keys=True)\n hashobj = hashlib.sha1(jsonstr)\n hexval = hashobj.hexdigest()\n return hexval", "def getHash(self, hashtype='sha1'):\n if not self.svghash256:\n blob_reader = blobstore.BlobReader(self.svgBlob)\n digest = hashlib.sha256(blob_reader.read()).digest()\n self.svghash256 = \"sha256-%s\" % (base64.b64encode(digest))\n self.put() # write back hash\n if not self.svghash:\n blob_reader = blobstore.BlobReader(self.svgBlob)\n digest = hashlib.sha1(blob_reader.read()).digest()\n self.svghash = \"sha1-%s\" % (base64.b64encode(digest))\n self.put() # write back hash\n if hashtype=='sha1':\n return \"%s\" % (self.svghash)\n elif hashtype == 'sha256':\n return \"%s\" % (self.svghash256)\n elif hashtype == 'both':\n return \"%s %s\" % (self.svghash,self.svghash256)", "def digest(self):\r\n\r\n H0 = self.H0\r\n H1 = self.H1\r\n H2 = self.H2\r\n H3 = self.H3\r\n H4 = self.H4\r\n inputdata = [] + self.inputdata\r\n count = [] + self.count\r\n\r\n index = (self.count[1] >> 3) & 0x3fL\r\n\r\n if index < 56:\r\n padLen = 56 - index\r\n else:\r\n padLen = 120 - index\r\n\r\n padding = ['\\200'] + ['\\000'] * 63\r\n self.update(padding[:padLen])\r\n\r\n # Append length (before padding).\r\n bits = _sha_bytelist2longBigEndian(self.inputdata[:56]) + count\r\n\r\n self._transform(bits)\r\n\r\n # Store state in digest.\r\n digest = _sha_long2bytesBigEndian(self.H0, 4) + \\\r\n _sha_long2bytesBigEndian(self.H1, 4) + \\\r\n _sha_long2bytesBigEndian(self.H2, 4) + \\\r\n _sha_long2bytesBigEndian(self.H3, 4) + \\\r\n _sha_long2bytesBigEndian(self.H4, 4)\r\n\r\n self.H0 = H0 \r\n self.H1 = H1 \r\n self.H2 = H2\r\n self.H3 = H3\r\n self.H4 = H4\r\n self.inputdata = inputdata \r\n self.count = count \r\n\r\n return digest", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def _calculate_hash(self, entry):\n entry.pop('id', None)\n return hashlib.sha224(json.dumps(\n entry, cls=DjangoJSONEncoder).encode('utf-8')).hexdigest()", "def hash(cls, path, digest=None, hasher=sha1):\r\n if digest is None:\r\n digest = hasher()\r\n with open(path, 'rb') as fh:\r\n cls.update_hash(fh, digest)\r\n return digest.hexdigest()", "def FNV1Hash(filename):\n \n FNV1_32_INIT = 0x811c9dc5\n FNV1_PRIME_32 = 16777619\n\n lowerName = filename.lower()\n \n _hash = FNV1_32_INIT\n uint32_max = 2 ** 32\n \n for c in lowerName:\n _hash = (_hash * FNV1_PRIME_32) % uint32_max\n _hash = _hash ^ ord(c)\n return format(_hash, 'x')", "def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()", "def GenerateHash(params):\n exp_params = params.ConvertToDict()\n return hashlib.sha1(\n repr(sorted(exp_params.items())).encode('utf-8')).hexdigest()" ]
[ "0.77072245", "0.7234007", "0.69447297", "0.6892159", "0.6755166", "0.6729288", "0.67109865", "0.6634047", "0.6630094", "0.65769774", "0.6461438", "0.6441741", "0.6404586", "0.63657", "0.6312148", "0.6248096", "0.62417644", "0.6229102", "0.6181597", "0.617851", "0.61565894", "0.61539555", "0.6150922", "0.613116", "0.61292875", "0.6075815", "0.6070594", "0.6068168", "0.6063443", "0.60604566", "0.60603315", "0.604533", "0.596239", "0.5944336", "0.59275955", "0.5916693", "0.590983", "0.5897342", "0.5882652", "0.58789694", "0.5877525", "0.58721876", "0.5859465", "0.5857351", "0.5849", "0.5846277", "0.58434844", "0.5830153", "0.5793363", "0.5777711", "0.5777006", "0.5777006", "0.5771952", "0.5767196", "0.57564825", "0.57558733", "0.5754588", "0.5750209", "0.57451636", "0.5722006", "0.57063895", "0.5702442", "0.57013714", "0.56845593", "0.56799436", "0.56799436", "0.56694794", "0.5668228", "0.56627214", "0.56399775", "0.56396866", "0.563733", "0.561654", "0.56084037", "0.56003803", "0.559693", "0.5590607", "0.55840737", "0.5581851", "0.55710256", "0.55560213", "0.5535217", "0.5531966", "0.5531881", "0.5513924", "0.55058366", "0.5505334", "0.5493309", "0.54912275", "0.54909503", "0.5486211", "0.54814714", "0.54812104", "0.5479956", "0.5479656", "0.5476515", "0.54760385", "0.5474334", "0.5468605", "0.5454126" ]
0.6487641
10
Create signature and save it into data/signature.txt
def sign(file_name: str) -> None: print("Signing the file...") file_name = os.path.join('data', file_name) file1 = open("data/key.txt", "r") file2 = open("data/secret_key.txt", "r") p = int(file1.readline().rstrip()) q = int(file1.readline().rstrip()) g = int(file1.readline().rstrip()) h = int(file1.readline().rstrip()) a = int(file2.readline().rstrip()) loop = True while loop: r = random.randint(1, q - 1) c1 = square_multiply(g, r, p) c1 = c1 % q c2 = sha_hash(file_name) + (a * c1) rinverse = compute_inverse(r, q) c2 = (c2 * rinverse) % q if c1 != 0 and c2 != 0: loop = False print('hash = ', sha_hash(file_name)) print('c1 = ', c1) print('c2 = ', c2) file = open("data/signature.txt", "w") file.write(str(c1)) file.write("\n") file.write(str(c2)) print("cipher stored at signature.txt")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_signature(self):\n self.db_file.write(b\"\\x4b\\x57\\x44\\x42\\x00\\x01\\x03\\x01\")", "def send_signature(self, update, context):\n msg_file = 'signature_msg.txt'\n self.send_textfile(msg_file, update, context)", "def GenSampleSignature(text):\r\n demo_keypair = ('RSA.mVgY8RN6URBTstndvmUUPb4UZTdwvwmddSKE5z_jvKUEK6yk1'\r\n 'u3rrC9yN8k6FilGj9K0eeUPe2hf4Pj-5CmHww=='\r\n '.AQAB'\r\n '.Lgy_yL3hsLBngkFdDw1Jy9TmSRMiH6yihYetQ8jy-jZXdsZXd8V5'\r\n 'ub3kuBHHk4M39i3TduIkcrjcsiWQb77D8Q==')\r\n\r\n signer = SignatureAlgRsaSha256(demo_keypair)\r\n return signer.Sign(text)", "def sign_file_dialog():\n if X is None or Y is None:\n raise Exception(ERRORS.INVALID_AUTH)\n\n file_path = input(\"Enter file path: \")\n signature_name = input(\"Enter signature identity: \")\n\n for c in signature_name:\n ascii_c = ord(c)\n if not ((ascii_c >= 65 and ascii_c <= 90) or (ascii_c >= 97 and ascii_c <= 122) or (ascii_c >= 48 and ascii_c <= 57) or ascii_c == 95):\n raise Exception(ERRORS.INVALID_SIGNATURE_NAME)\n\n if not os.path.exists(file_path):\n raise Exception(ERRORS.INVALID_FILE)\n \n with open(file_path, \"rb\") as file:\n file_hash = hashlib.sha256(file.read()).hexdigest()\n file_hash_int = int(file_hash, 16)\n \n k = random.randint(1, Q-1)\n r = 0\n while r==0:\n r = int(pow(G, k, P)) % Q\n s = (pow(k,Q-2,Q)*(file_hash_int + X%Q*r%Q)%Q) % Q\n\n with open(f\"{signature_name}.sig\", \"w\") as file:\n file.write(str(r) + \"\\n\")\n file.write(str(s) + \"\\n\")\n \n print(f\"Signature {signature_name}.sig created successfully.\")", "def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id", "def sign(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = Signature.sign(SignableBinaryIO(file), Md5, key)\n\n sign.write(TomlSignatureFormatter().to_string(signature))\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")", "def create_token(filename):\n\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n\n sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)\n vk = sk.verifying_key\n if vk is not None:\n line = encode_line(\"signing-key\", sk.to_der(), vk.to_der())\n\n with open(filename, \"w\") as f:\n f.write(line)", "def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)", "def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature", "def make_signature(self, data, classes=None): \n raise NotImplementedError", "def get_signature_xml() -> str:\n return render_to_string(\"saml/xml/signature.xml\", {})", "def add_sign(self):\n if self.is_signed():\n self.remove_sign()\n \n data = self._document.read()\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n hash_value = encrypted[-16:]\n self._document.write(self._seperator.encode() + hash_value + self._seperator.encode())\n print(\"The document is signed!\")", "def upgrade_savefile(fn):\n\n if signing_keys is None:\n return\n\n atime = os.path.getatime(fn)\n mtime = os.path.getmtime(fn)\n\n with zipfile.ZipFile(fn, \"a\") as zf:\n\n if \"signatures\" in zf.namelist():\n return\n\n log = zf.read(\"log\")\n zf.writestr(\"signatures\", sign_data(log))\n\n os.utime(fn, (atime, mtime))", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)", "def sign(filename: str) -> str:\n fs, relative_path = url_to_fs(filename)\n try:\n return cast(str, fs.sign(relative_path))\n except NotImplementedError:\n return filename", "def sign(file, outfile):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if file and file != \"-\":\n if not os.path.isfile(file):\n raise Exception(\"File %s does not exist!\" % file)\n with open(file) as fp:\n tx = fp.read()\n if tx.find('\\0') > 0:\n with open(file, encoding='utf-16') as fp:\n tx = fp.read()\n else:\n tx = click.get_text_stream('stdin')\n tx = ast.literal_eval(tx)\n tx = mph.sign(tx, reconstruct_tx=False)\n tx = json.dumps(tx, indent=4)\n if outfile and outfile != \"-\":\n with open(outfile, 'w') as fp:\n fp.write(tx)\n else:\n print(tx)", "def create_samfile(self):", "def sign(self, body, external_aad, private_key):", "def test_create_image_signature(self):\n pass", "def sign(self, data):\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return b64encode(signature)", "def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")", "def make_signature(self, data, classes=None):\n N = len(self.genes)\n self.signatures = ((N-1)*self.signatures + data[:,self.added])/N", "def sign_data(data):\n\n rv = \"\"\n\n for i in signing_keys:\n sk = ecdsa.SigningKey.from_der(i)\n\n if sk is not None and sk.verifying_key is not None:\n sig = sk.sign(data)\n rv += encode_line(\"signature\", sk.verifying_key.to_der(), sig)\n\n return rv", "def sign_file(filename, key_file, cert_file, password_fd):\n data = file(filename).read()\n signed_binary = sign_data(data, key_file, password_fd)\n cert_data = file(cert_file).read()\n\n # Save certificate\n file('%s.%s' % (filename, EXT_CERT), 'w').write(cert_data)\n\n # Save signed data\n file('%s.%s' % (filename, EXT_SIGN), 'w').write(signed_binary)", "def write_sigmf(data_file, data, buffer=None, append=True):\n\n packed = pack_bin(data)\n\n write_bin(data_file, packed, buffer, append)", "def checksignature(self):\n if(self.name=='ORBIT'): return\n if(self.ctpnum==0): return\n cmd=\"CheckSignature(\"+self.board+\",\"+self.signature+\",\"+self.ctpnum+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print \"input checksignature: \",output\n #self.signatureM=", "def sign(self):\r\n self._reset()\r\n if hasattr(self, \"_privateKey\"):\r\n if \"fee\" not in self:\r\n setFees(self)\r\n if self.type == 4:\r\n missings = \\\r\n self.asset[\"multiSignature\"][\"min\"] - \\\r\n len(self.get(\"signature\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n self[\"signature\"] = dposlib.core.crypto.getSignature(\r\n self, self._privateKey\r\n )\r\n else:\r\n raise Exception(\"orphan transaction can not sign itsef\")", "def sign(self, data):\n from base64 import urlsafe_b64encode\n\n if self.sign_private == \"\":\n raise ValueError(\"Error signing: No private signing key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return urlsafe_b64encode(signature)", "def saveToFile (self, filename=\"pub.key\"):\n key_file = open(filename, \"w\")\n key_file.write(str(self.n) + \";\" + str(self.n_sq) + \";\" + str(self.g))\n key_file.close()", "def PCTSignatures_create(initSampleCount=None, initSeedCount=None, pointDistribution=None): # real signature unknown; restored from __doc__\n pass", "def create_signature(auth_scheme, api_key_secret, signing_data, timestamp, nonce):\n if auth_scheme == 'VERACODE-HMAC-SHA-256':\n signature = create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce)\n else:\n raise UnsupportedAuthSchemeException('Auth scheme {auth_scheme} not supported'.format(auth_scheme=auth_scheme))\n return signature", "def sign(self, data: bytes) -> bytes:\n return self._signing_key.sign(data).signature", "def serialize_to_signature(cls, value):\n raise NotImplementedError", "def sigfile(fpath):\n sigsha = hashlib.sha1()\n fbj = open(fpath, 'rb')\n try:\n sigsha.update(fbj.read()) # pylint: disable-msg=E1101\n finally:\n fbj.close()\n return sigsha.hexdigest()", "def sendToSign(cmd):\n\tsubprocess.call([\"./sign.sh\", cmd])", "def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)", "def create_signature(content, keyid=None, homedir=None, timeout=GPG_TIMEOUT):\n if not have_gpg(): # pragma: no cover\n raise exceptions.UnsupportedLibraryError(NO_GPG_MSG)\n\n if not CRYPTO: # pragma: no cover\n raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)\n\n keyarg = \"\"\n if keyid:\n formats.KEYID_SCHEMA.check_match(keyid)\n keyarg = (\n \"--local-user {}\".format( # pylint: disable=consider-using-f-string\n keyid\n )\n )\n\n homearg = \"\"\n if homedir:\n homearg = (\n \"--homedir {}\".format( # pylint: disable=consider-using-f-string\n homedir\n ).replace(\"\\\\\", \"/\")\n )\n\n command = gpg_sign_command(keyarg=keyarg, homearg=homearg)\n\n gpg_process = subprocess.run( # nosec\n command,\n input=content,\n check=False,\n capture_output=True,\n timeout=timeout,\n )\n\n # TODO: It's suggested to take a look at `--status-fd` for proper error\n # reporting, as there is no clear distinction between the return codes\n # https://lists.gnupg.org/pipermail/gnupg-devel/2005-December/022559.html\n if gpg_process.returncode != 0:\n raise CommandError(\n \"Command '{}' returned \" # pylint: disable=consider-using-f-string\n \"non-zero exit status '{}', stderr was:\\n{}.\".format(\n gpg_process.args,\n gpg_process.returncode,\n gpg_process.stderr.decode(),\n )\n )\n\n signature_data = gpg_process.stdout\n signature = parse_signature_packet(signature_data)\n\n # On GPG < 2.1 we cannot derive the full keyid from the signature data.\n # Instead we try to compute the keyid from the public part of the signing\n # key or its subkeys, identified by the short keyid.\n # parse_signature_packet is guaranteed to return at least one of keyid or\n # short_keyid.\n # Exclude the following code from coverage for consistent coverage across\n # test environments.\n if not signature[\"keyid\"]: # pragma: no cover\n log.warning(\n \"The created signature does not include the hashed subpacket\" # pylint: disable=logging-format-interpolation,consider-using-f-string\n \" '33' (full keyid). You probably have a gpg version <{}.\"\n \" We will export the public keys associated with the short keyid to\"\n \" compute the full keyid.\".format(FULLY_SUPPORTED_MIN_VERSION)\n )\n\n short_keyid = signature[\"short_keyid\"]\n\n # Export public key bundle (master key including with optional subkeys)\n public_key_bundle = export_pubkey(short_keyid, homedir)\n\n # Test if the short keyid matches the master key ...\n master_key_full_keyid = public_key_bundle[\"keyid\"]\n if master_key_full_keyid.endswith(short_keyid.lower()):\n signature[\"keyid\"] = master_key_full_keyid\n\n # ... or one of the subkeys, and add the full keyid to the signature dict.\n else:\n for sub_key_full_keyid in list(\n public_key_bundle.get(\"subkeys\", {}).keys()\n ):\n if sub_key_full_keyid.endswith(short_keyid.lower()):\n signature[\"keyid\"] = sub_key_full_keyid\n break\n\n # If there is still no full keyid something went wrong\n if not signature[\"keyid\"]: # pragma: no cover\n raise ValueError(\n \"Full keyid could not be determined for signature '{}'\".format( # pylint: disable=consider-using-f-string\n signature\n )\n )\n\n # It is okay now to remove the optional short keyid to save space\n signature.pop(\"short_keyid\", None)\n\n return signature", "def create_submission(y_hat_click, y_hat_sale=None, filename: str = None, description: str = None):\n np.savetxt(Y_HAT_CLICK_FN, y_hat_click, fmt='%1.6f')\n if y_hat_sale is not None:\n np.savetxt(Y_HAT_SALE_FN, y_hat_sale, fmt='%1.6f')\n if filename is None:\n filename = 'submissions/submission-%s.zip' % str(datetime.now()).replace(' ', '_').replace(':', '-')\n with ZipFile(filename, 'w') as zip:\n zip.write(Y_HAT_CLICK_FN)\n if y_hat_sale is not None:\n zip.write(Y_HAT_SALE_FN)\n if description is not None and len(description):\n zip.writestr('description', description)\n print('wrote', filename)\n return filename", "def get_signature(self):\n return \" \".join(self.segments[-1].unixtext.replace(\n u\"\\n\", \" \").strip().split())", "def write_scram_toolfile(self, contents, filename):\n with open(self.spec.prefix.etc + '/scram.d/' + filename, 'w') as f:\n f.write(contents)\n f.close()", "def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64", "def get_synapse_signing_key(self):\n if not path.exists(self.synapse_signing_key_file):\n key_id = \"a_\" + self.random_string(4)\n key_content = generate_signing_key(key_id)\n with open(self.synapse_signing_key_file, \"w+\") as key_file:\n write_signing_keys(key_file, (key_content,))\n return self.synapse_signing_key_file", "def generate_shared_access_signature(\n self, permission=None, # type: Optional[Union[FilePermissions, str]]\n expiry=None, # type: Optional[Union[datetime, str]]\n start=None, # type: Optional[Union[datetime, str]]\n policy_id=None, # type: Optional[str]\n ip=None, # type: Optional[str]\n protocol=None, # type: Optional[str]\n cache_control=None, # type: Optional[str]\n content_disposition=None, # type: Optional[str]\n content_encoding=None, # type: Optional[str]\n content_language=None, # type: Optional[str]\n content_type=None # type: Optional[str]\n ):\n # type: (...) -> str\n if not hasattr(self.credential, 'account_key') or not self.credential.account_key:\n raise ValueError(\"No account SAS key available.\")\n sas = FileSharedAccessSignature(self.credential.account_name, self.credential.account_key)\n if len(self.file_path) > 1:\n file_path = '/'.join(self.file_path[:-1])\n else:\n file_path = None # type: ignore\n return sas.generate_file( # type: ignore\n self.share_name,\n file_path,\n self.file_name,\n permission,\n expiry,\n start=start,\n policy_id=policy_id,\n ip=ip,\n protocol=protocol,\n cache_control=cache_control,\n content_disposition=content_disposition,\n content_encoding=content_encoding,\n content_language=content_language,\n content_type=content_type)", "def write(self, path=None, sync=True):\n path = path or self.signatures_file\n directory = os.path.split(path)[0]\n\n if sync and os.path.exists(path):\n # reload and save if file exists\n with open(path, \"r\") as f:\n lock_file(f)\n try:\n sigs = json.load(f)\n finally:\n unlock_file(f)\n\n sigs.update(\n self.signatures\n ) # reload file and merge cached sigs into what we load from file\n self.signatures = sigs\n\n if directory and not os.path.exists(directory):\n os.makedirs(directory) # create folder structure if not existS\n\n if not os.path.exists(path): # creates signatures.json file if it doesn't exist\n open(path, \"w\").close()\n\n with open(path, \"r+\") as f: # placing 'w+' here will result in race conditions\n lock_file(f, exclusive=True)\n try:\n json.dump(self.signatures, f)\n finally:\n unlock_file(f)\n\n return self", "def test_sign_file_unsupported_format(dummy_command, tmp_path, capsys):\n # FIXME: I'm not sure how to manufacture this in practice.\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\n \"unsupported format for signature\"\n )\n\n # Sign the file\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n )\n\n # An attempt to codesign was made\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n ],\n any_order=False,\n )\n\n # The console includes a warning about not needing a signature.\n assert \"... no signature required\\n\" in capsys.readouterr().out", "def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):\r\n message = signing_format_message(method, headers_dict, body_dict)\r\n\r\n # hmac needs a byte string for it's starting key, can't be unicode.\r\n hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)\r\n signature = binascii.b2a_base64(hashed.digest()).rstrip('\\n')\r\n authorization_header = \"SSI {}:{}\".format(access_key, signature)\r\n\r\n message += '\\n'\r\n return message, signature, authorization_header", "def test_signature_validation(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertTrue(app.utils.validate_signed_data(\n self._body,\n signature,\n settings.PUBLIC_KEY\n ))", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def generate_cybersource_sa_signature(payload):\n # This is documented in certain CyberSource sample applications:\n # http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_SOP/html/wwhelp/wwhimpl/js/html/wwhelp.htm#href=creating_profile.05.6.html\n keys = payload[\"signed_field_names\"].split(\",\")\n message = \",\".join(f\"{key}={payload[key]}\" for key in keys)\n\n digest = hmac.new(\n settings.CYBERSOURCE_SECURITY_KEY.encode(\"utf-8\"),\n msg=message.encode(\"utf-8\"),\n digestmod=hashlib.sha256,\n ).digest()\n\n return b64encode(digest).decode(\"utf-8\")", "def save(self, filename):\n aead_f = open(filename, \"wb\")\n fmt = \"< B I %is %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE, len(self.data))\n version = 1\n packed = struct.pack(fmt, version, self.key_handle, self.nonce, self.data)\n aead_f.write(YHSM_AEAD_File_Marker + packed)\n aead_f.close()", "def generate_signature(payload):\n gemini_api_secret = get_secret_key()\n t = datetime.now()\n payload[\"nonce\"] = str(int(mktime(t.timetuple())*1000) + get_nonce())\n encoded_payload = dumps(payload).encode()\n b64 = b64encode(encoded_payload)\n signature = new(gemini_api_secret, b64, sha384).hexdigest()\n update_session(\"X-GEMINI-PAYLOAD\", b64)\n update_session(\"X-GEMINI-SIGNATURE\", signature)\n increment_nonce()", "def create_signature(self, privkey, reedem_script):\n seckey = CIoncoinSecret.from_secret_bytes(x(ioncointools.encode_privkey(privkey, \"hex\")))\n signatures = []\n for i in range(len(self.tx.vin)):\n sighash = SignatureHash(CScript(x(reedem_script)), self.tx, i, SIGHASH_ALL)\n signatures.append({\n \"index\": i,\n \"signature\": (seckey.sign(sighash) + struct.pack('<B', SIGHASH_ALL)).encode(\"hex\"),\n \"outpoint\": b2lx(self.tx.vin[i].prevout.hash) + b2lx(struct.pack(b\"<I\", self.tx.vin[i].prevout.n))\n })\n return signatures", "def sign(self, payload):\n raise NotImplementedError", "def sign(cls, upload, location=None):\n path = \"uploader/sign/%s\" % upload[\"id\"]\n kwargs = {\"md5\": upload[\"md5\"], \"location\": location}\n try:\n return Backend.put(path, kwargs, headers=Backend.headers())\n except requests.HTTPError as err:\n if err.response.status_code == 410:\n LOGGER.warning(\"Cannot Touch file %s. Already finished \\\n (not active) (410)\", upload[\"id\"])\n raise err\n except:\n raise", "def make_signature(secret: VersionedSecret, message: str, max_age: datetime.timedelta) -> bytes:\n version = 1\n expiration = int(time.time() + max_age.total_seconds())\n header = _HEADER_FORMAT.pack(version, expiration)\n digest = _compute_digest(secret.current, header, message)\n return base64.urlsafe_b64encode(header + digest)", "def output_signature(self):\n return _spacegrant_swig.binary_sink_sptr_output_signature(self)", "def _sign_document(self):\n return False", "def build_signature(inputs, outputs):\r\n signature_inputs = {\r\n key: saved_model_utils.build_tensor_info(tensor)\r\n for key, tensor in inputs.items()\r\n }\r\n signature_outputs = {\r\n key: saved_model_utils.build_tensor_info(tensor)\r\n for key, tensor in outputs.items()\r\n }\r\n\r\n signature_def = signature_def_utils.build_signature_def(\r\n signature_inputs, signature_outputs,\r\n signature_constants.PREDICT_METHOD_NAME)\r\n\r\n return signature_def", "def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)", "def _generate_signature(self):\n self.logger.debug(f'body payload {self.body_payload}')\n return hmac.new(self.__decrypted_secret, self.body_payload, hashlib.sha1).hexdigest()", "def add_sig(self, s):\n self.sigs += ' ' * self.indent + s + '\\n'", "def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')", "def bleu_signature(args, numrefs):\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'smooth': 's',\n 'case': 'c',\n 'tok': 'tok',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'tok': args.tokenize,\n 'version': VERSION,\n 'smooth': args.smooth,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n # For the Japanese tokenizer, add a dictionary type and its version to the signature.\n if args.tokenize == \"ja-mecab\":\n signature['tok'] += \"-\" + TokenizeMeCab().signature()\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr", "def sign(self, message, randombytes=urandom):\r\n int_header = 0x30 + logn[self.n]\r\n header = int_header.to_bytes(1, \"little\")\r\n\r\n salt = randombytes(SALT_LEN)\r\n hashed = self.hash_to_point(message, salt)\r\n\r\n # We repeat the signing procedure until we find a signature that is\r\n # short enough (both the Euclidean norm and the bytelength)\r\n '''\r\n print(\"---------Inside sign----------\")\r\n '''\r\n while(1):\r\n if (randombytes == urandom):\r\n s = self.sample_preimage(hashed)\r\n '''\r\n print(\"s: \", s)\r\n '''\r\n else:\r\n seed = randombytes(SEED_LEN)\r\n s = self.sample_preimage(hashed, seed=seed)\r\n norm_sign = sum(coef ** 2 for coef in s[0])\r\n norm_sign += sum(coef ** 2 for coef in s[1])\r\n # Check the Euclidean norm\r\n if norm_sign <= self.signature_bound:\r\n\r\n enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)\r\n # Check that the encoding is valid (sometimes it fails)\r\n if (enc_s is not False):\r\n return header + salt + enc_s\r\n '''\r\n else:\r\n print(\"-------------INVALID encoding---------------\")\r\n\r\n else:\r\n print(\"-------------NOT within signature bound---------------\")\r\n '''", "def _buildSignatureString(self):\n self.params=self.kargs\n \n try: method_details=self.MMAP[self.method]\n except: \n raise RuntimeError(\"unsupported method\")\n \n api_key_required=method_details[\"api_key_required\"]\n if api_key_required:\n self.params.update({\"api_key\": self.API_KEY, \"method\":self.method})\n \n signature_required=method_details[\"signature_required\"]\n if not signature_required:\n self.signature_string=\"\"\n return\n \n sorted_keys=self.params.keys().sort()\n \n str=\"\"\n try:\n for key in sorted_keys:\n if key not in self.PARAMS_TO_EXCLUDE_FROM_SIGNATURE:\n \n ## assume the parameter's value is valid\n try: \n if key not in self.PARAMS_TO_EXCLUDE_FROM_UTF8_ENCODING:\n value=self.params[key].encode(\"utf-8\")\n else:\n value=self.params[key]\n except: value=self.params[key]\n str=\"%s%s\" % (key, value)\n except:\n pass\n \n str += self.API_SECRET\n m=hashlib.md5()\n m.update(str)\n self.signature_string=m.hexdigest()\n \n self.kargs.update({\"api_sig\": self.signature_string})", "def insert_tx_signature(tx, index, signature, public_key):\n tx_obj = deserialize(tx)\n tx_obj[\"ins\"][index][\"script\"] = serialize_script([signature, public_key])\n\n return serialize(tx_obj)", "def _sign(self, path, nonce, data):\n url = '/{0}/{1}'.format(self._version, path)\n urlencoded_data = urllib.urlencode(data)\n msg = url + hashlib.sha256(str(nonce) + urlencoded_data).digest()\n signature = hmac.new(base64.b64decode(self._secret), msg,\n hashlib.sha512)\n return base64.b64encode(signature.digest())", "def add_signature(self, pubkey: PublicKey, signature: Signature) -> None:\n presigner = Presigner(pubkey.to_solders(), signature)\n self._solders.partial_sign([presigner], self._solders.message.recent_blockhash)", "def test_sign_file_entitlements(dummy_command, tmp_path):\n # Sign the file with an ad-hoc identity\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n entitlements=tmp_path\n / \"base_path\"\n / \"build\"\n / \"first-app\"\n / \"macos\"\n / \"app\"\n / \"Entitlements.plist\",\n )\n\n # An attempt to codesign was made without the runtime option\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(tmp_path, tmp_path / \"base_path\" / \"random.file\"),\n ],\n any_order=False,\n )", "def aws_signature(bucket,keypath,expires,secret_access_key=''):\n sign_msg = ('GET\\n\\n\\n'+expires+'\\n' +'/'+bucket+'/'+keypath)\n h = hmac.new(secret_access_key, sign_msg, hashlib.sha1)\n signature = urllib.quote(base64.b64encode(h.digest()))\n return (signature,sign_msg)", "def sign_blob(blob, deadline=None):\n # app_identity.sign_blob is producing RSA+SHA256 signature. Sadly, it isn't\n # documented anywhere. But it should be relatively stable since this API is\n # used by OAuth2 libraries (and so changing signature method may break a lot\n # of stuff).\n return app_identity.sign_blob(blob, deadline)", "def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))", "def saveToFile (self, filename=\"priv.key\"):\n key_file = open(filename, \"w\")\n key_file.write(str(self.lamb) + \";\" + str(self.mu))\n key_file.close()", "def write(self, filename=None):\n if filename is None:\n filename = self.filename\n else:\n self.filename = filename\n self.header[\"sha1sum\"] = self._get_checksum(self.format())\n with open(filename, \"w+\") as handle:\n handle.write(self.format())", "def verify_signature_dialog():\n signature_name = input(\"Enter signature identity: \")\n file_path = input(\"Enter file path: \")\n user = input(\"Enter username: \")\n\n if not(os.path.exists(user)):\n raise Exception(ERRORS.NOT_FOUND_USER)\n if not(os.path.exists(f\"{signature_name}.sig\")):\n raise Exception(ERRORS.NOT_FOUND_SIGNATURE)\n if not(os.path.exists(file_path)):\n raise Exception(ERRORS.NOT_FOUND_FILE)\n\n with open(user, \"r\") as file:\n _ = int(file.readline())\n y = int(file.readline())\n with open(f\"{signature_name}.sig\", \"r\") as file:\n r = int(file.readline())\n s = int(file.readline())\n with open(file_path, \"rb\") as file:\n file_hash = hashlib.sha256(file.read()).hexdigest()\n file_hash_int = int(file_hash, 16)\n \n if (r<0 or r>=Q) or (s<0 or s>=Q):\n raise Exception(ERRORS.INVALID_SIGNATURE)\n \n w = pow(s, Q-2, Q)\n u1 = (file_hash_int * w) % Q\n u2 = (r * w) % Q\n v = ((pow(G, u1, P) * pow(y, u2, P)) % P) % Q\n\n if v == r:\n print(f\"Signature is valid. The signature {signature_name}.sig verifies that {file_path} is sent by {user}.\")\n return\n \n print(f\"Signature is not valid.\")", "def signature(self, signature):\n\n self._signature = signature", "def signature(self, signature):\n\n self._signature = signature", "def verify_signature(signed_file_path, output_file_path):\n cmd = [\"gpg\", \"-d\"]\n keyring_path = configuration.get_gpg_public_keyring_path()\n\n # if a keyring is specified in the conf, used it, else use default one\n if keyring_path != \"\":\n cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]\n cmd += [\"--output\", output_file_path, signed_file_path]\n\n # temporary workaround for the omi/gpg bug causing gpg to create a .gpg folder in the wrong home dir\n # only apply the workaround for oms installation\n env = None\n if \"nxOMSAutomationWorkerResource\" in os.path.abspath(__file__):\n env = os.environ.copy()\n env[\"HOME\"] = \"/var/opt/microsoft/omsagent/run\"\n\n proc = subprocessfactory.create_subprocess(cmd=cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n if proc.poll() == 0:\n tracer.log_debug_trace(\"Signature is valid.\")\n return True\n\n tracer.log_sandbox_job_runbook_signature_validation_failed(stderr)\n return False", "def get_signature_for_message(message, filename='private.key'):\n message = dict(sorted(message.items()))\n message = json.dumps(message)\n\n private_key_path = os.path.join('keys', filename)\n with open(private_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n h = SHA.new(message.encode()).digest()\n signature = private_key.sign(h, '')\n\n return base64.b64encode(bytes(str(signature[0]).encode()))", "def create_file(self, sensor_id:str, timestamp:str, sensor_name:str)->str:\n file_name = '%s/%s.%s.%s.json' % (self.generate_data_prep, sensor_id, timestamp, sensor_name)\n try: \n open(file_name, 'w').close()\n except Exception as e: \n print(\"Unable to create file (%s) - %s\" % (self.generate_data_prep, e))\n return False \n return file_name", "def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "def signatureScript(tx, idx, subscript, hashType, privKey, compress):\n\n sig = rawTxInSignature(tx, idx, subscript, hashType, privKey.key)\n\n pubKey = privKey.pub\n\n if compress:\n pkData = pubKey.serializeCompressed()\n else:\n pkData = pubKey.serializeUncompressed()\n\n script = addData(sig)\n script += addData(pkData)\n\n return script", "def test_sign_file_adhoc_identity(dummy_command, tmp_path):\n # Sign the file with an ad-hoc identity\n dummy_command.sign_file(tmp_path / \"base_path\" / \"random.file\", identity=\"-\")\n\n # An attempt to codesign was made without the runtime option\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"-\",\n entitlements=False,\n runtime=False,\n ),\n ],\n any_order=False,\n )", "def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)", "def sign_plaintext(client, request):\n return plaintext_signature(client.client_secret, client.token_secret)", "def to_file(self, data, file, pubkey_id):\n must_close = False\n if isinstance(file, str):\n try:\n file = open(file, \"wb\")\n except PermissionError as e:\n raise GPG.EncryptionException(str(e))\n\n result = subprocess.run(\n [GPG.bin, \"--encrypt\", \"-r\", pubkey_id],\n input=data,\n stdout=file,\n stderr=subprocess.PIPE\n )\n if must_close:\n file.close()\n if result.returncode == 0:\n # It was successful\n return\n else:\n raise GPG.EncryptionException(result.stderr)", "def daily_signature(key, message):\n byte_key = binascii.unhexlify(key)\n message = message.encode()\n return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()", "def sign_command(args):\n if args.files:\n die(\"Unexpected positional arguments\")\n\n # Load certificate request\n if not args.request:\n die(\"Need --request\")\n subject_csr = load_req(args.request)\n\n reset_info = None\n if args.reset:\n reset_info = info_from_args(args)\n\n # Load CA info\n if not args.ca_info:\n die(\"Need --ca-info\")\n if args.ca_info.endswith('.csr'):\n issuer_obj = load_req(args.ca_info)\n else:\n issuer_obj = load_cert(args.ca_info)\n\n # Load CA private key\n issuer_key = load_key(args.ca_key, load_password(args.password_file))\n if not same_pubkey(issuer_key, issuer_obj):\n die(\"--ca-private-key does not match --ca-info data\")\n\n # Certificate generation\n cert = do_sign(subject_csr, issuer_obj, issuer_key, args.days, args.path_length, args.request, reset_info=reset_info)\n\n # Write certificate\n do_output(cert_to_pem(cert), args, 'x509')", "def json_dump_signature(packet_number, entity_name):\n directory = os.path.join(current_app.config['JSON_DUMPS_DIR'], \"json-dump-%s\" % packet_number)\n filename = '%s.tar.xz.asc' % entity_name\n if not os.path.isfile(safe_join(directory, filename)):\n return Response(\"Can't find signature for the specified JSON dump!\", status=404)\n return send_from_directory(directory, filename, mimetype=MIMETYPE_SIGNATURE)", "def handle_signature(self, sig, signode):\n raise NotImplementedError", "def create_id_nonce_signature(\n cls, *, signature_inputs: TSignatureInputs, private_key: bytes,\n ) -> bytes:\n ...", "def verify(cls, data, sig):\n data_file = tempfile.mktemp()\n with open(data_file, 'w+b') as fout:\n fout.write(data.read())\n verification = settings.GPG.verify_file(sig, data_file)\n os.unlink(data_file)\n return verification", "def Sign(self, data):\n return self.rsa_key.sign(data, padding.PKCS1v15(), utils.Prehashed(hashes.SHA1()))", "def save(self, save_dir):\n path = os.path.join(save_dir, self.name + \".pem\")\n with open(path, \"wb\") as f:\n f.write(self.public_key)", "def test_unique_signature_constraint(self):\n with self.assertRaisesRegex(ValidationError, 'Signature with this Agreement and Signatory already exists.'):\n new_test_sig = Signature(agreement=self.test_agreement,\n signatory=self.test_user,\n username=self.test_user.username,\n first_name=self.test_user.first_name,\n last_name=self.test_user.last_name,\n email=self.test_user.email,\n department=self.test_department)\n new_test_sig.full_clean()", "def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature", "def make_dummy_file(path, contents=None):\n dirname = op.dirname(path)\n\n if not op.exists(dirname):\n os.makedirs(dirname)\n\n if contents is None:\n contents = '{}\\n'.format(op.basename(path))\n with open(path, 'wt') as f:\n f.write(contents)\n\n return hash(contents)", "def gpg_sign(target, source, env):\n\n # Print out.\n print('')\n print(\"############################################\")\n print(\"# GPG signing the binary distribution file #\")\n print(\"############################################\\n\\n\")\n\n # List of distribution files.\n type_list = [env['DIST_TYPE']]\n if type_list[0] == 'ALL':\n type_list = ['zip', 'tar']\n\n # GPG key.\n key = env['GPG_KEY']\n if key == None:\n sys.stderr.write(\"The GPG key needs to be supplied on the command line as key=xxxxx, where xxxxx is the name of your key.\\n\\n\")\n return\n\n # Loop over the distribution files.\n for dist_type in type_list:\n # The file name.\n if dist_type == 'zip':\n file = env['DIST_FILE'] + '.zip'\n elif dist_type == 'tar':\n file = env['DIST_FILE'] + '.tar.bz2'\n elif dist_type == 'dmg':\n file = env['DIST_FILE'] + '.dmg'\n\n # Print out.\n print(\"\\n\\nSigning the distribution package \" + repr(file) + \".\\n\")\n\n # Run the 'gpg' command.\n system(\"gpg --detach-sign --default-key \" + key + \" \" + path.pardir + path.sep + file)\n\n # Final printout.\n print(\"\\n\\n\\n\")" ]
[ "0.7170983", "0.6586112", "0.65388995", "0.6313484", "0.62940675", "0.62849694", "0.61516637", "0.61021864", "0.6018372", "0.59855306", "0.59176886", "0.5898463", "0.5864457", "0.58549124", "0.5852976", "0.5838192", "0.5816089", "0.58031625", "0.5802103", "0.5800313", "0.5731868", "0.5715472", "0.56711113", "0.5626532", "0.5615716", "0.55970865", "0.55852294", "0.5582744", "0.55672634", "0.5545396", "0.55422205", "0.5537409", "0.55278647", "0.5514203", "0.5502658", "0.54990745", "0.548228", "0.5469723", "0.5469549", "0.5468289", "0.5463339", "0.54604584", "0.545391", "0.54504347", "0.5439764", "0.5432306", "0.5431919", "0.54307836", "0.54300183", "0.541527", "0.5410703", "0.54070896", "0.539825", "0.53919154", "0.5358083", "0.53490746", "0.5347689", "0.5337329", "0.53193355", "0.53158295", "0.531544", "0.5309933", "0.530226", "0.5297009", "0.52958953", "0.52741545", "0.5263106", "0.5259518", "0.52575576", "0.5251283", "0.52415115", "0.5231058", "0.5224949", "0.52201605", "0.5218849", "0.5216441", "0.5214026", "0.5214026", "0.5211373", "0.5205738", "0.51933545", "0.5192465", "0.5182752", "0.5177504", "0.5163485", "0.5159772", "0.51589453", "0.51579106", "0.5154982", "0.5146897", "0.5144467", "0.5133136", "0.5131731", "0.512632", "0.51255804", "0.51254326", "0.51218456", "0.5110258", "0.5109075", "0.51086634" ]
0.62543976
6
Verification process of signature for file name document
def verification(file_name: str) -> None: print("Verification process...") file_name = os.path.join('data', file_name) file1 = open("data/key.txt", "r") file2 = open("data/signature.txt", "r") p = int(file1.readline().rstrip()) q = int(file1.readline().rstrip()) g = int(file1.readline().rstrip()) h = int(file1.readline().rstrip()) c1 = int(file2.readline().rstrip()) c2 = int(file2.readline().rstrip()) print('c1 = ', c1) print('c2 = ', c2) t1 = sha_hash(file_name) print('hash = ', t1) inverseC2 = compute_inverse(c2, q) t1 = (t1 * inverseC2) % q t2 = compute_inverse(c2, q) t2 = (t2 * c1) % q valid1 = square_multiply(g, t1, p) valid2 = square_multiply(h, t2, p) valid = ((valid1 * valid2) % p) % q if valid == c1: print("Valid signature") else: print("Invalid signature")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_signature_dialog():\n signature_name = input(\"Enter signature identity: \")\n file_path = input(\"Enter file path: \")\n user = input(\"Enter username: \")\n\n if not(os.path.exists(user)):\n raise Exception(ERRORS.NOT_FOUND_USER)\n if not(os.path.exists(f\"{signature_name}.sig\")):\n raise Exception(ERRORS.NOT_FOUND_SIGNATURE)\n if not(os.path.exists(file_path)):\n raise Exception(ERRORS.NOT_FOUND_FILE)\n\n with open(user, \"r\") as file:\n _ = int(file.readline())\n y = int(file.readline())\n with open(f\"{signature_name}.sig\", \"r\") as file:\n r = int(file.readline())\n s = int(file.readline())\n with open(file_path, \"rb\") as file:\n file_hash = hashlib.sha256(file.read()).hexdigest()\n file_hash_int = int(file_hash, 16)\n \n if (r<0 or r>=Q) or (s<0 or s>=Q):\n raise Exception(ERRORS.INVALID_SIGNATURE)\n \n w = pow(s, Q-2, Q)\n u1 = (file_hash_int * w) % Q\n u2 = (r * w) % Q\n v = ((pow(G, u1, P) * pow(y, u2, P)) % P) % Q\n\n if v == r:\n print(f\"Signature is valid. The signature {signature_name}.sig verifies that {file_path} is sent by {user}.\")\n return\n \n print(f\"Signature is not valid.\")", "def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)", "def check_sig(filename):\n pipe = Popen([\"gpg\", \"--verify\", filename], stderr=PIPE)\n pipe.stderr.read()\n status = pipe.wait()\n if status != 0:\n raise BadSignature('%s is not properly signed' % filename)", "def _sign_document(self):\n return False", "def verify_signature(self, inputs, signature):\n pass", "def verify_signature(self, inputs, signature):\n pass", "def verify_signature(signed_file_path, output_file_path):\n cmd = [\"gpg\", \"-d\"]\n keyring_path = configuration.get_gpg_public_keyring_path()\n\n # if a keyring is specified in the conf, used it, else use default one\n if keyring_path != \"\":\n cmd += [GPG_NO_DEFAULT_KEYRING_OPTION, GPG_KEYRING_ARG, keyring_path]\n cmd += [\"--output\", output_file_path, signed_file_path]\n\n # temporary workaround for the omi/gpg bug causing gpg to create a .gpg folder in the wrong home dir\n # only apply the workaround for oms installation\n env = None\n if \"nxOMSAutomationWorkerResource\" in os.path.abspath(__file__):\n env = os.environ.copy()\n env[\"HOME\"] = \"/var/opt/microsoft/omsagent/run\"\n\n proc = subprocessfactory.create_subprocess(cmd=cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n if proc.poll() == 0:\n tracer.log_debug_trace(\"Signature is valid.\")\n return True\n\n tracer.log_sandbox_job_runbook_signature_validation_failed(stderr)\n return False", "def check_sig(self):\n check_sig(self.path)\n dsc = self.get_dsc()\n if dsc is not None:\n check_sig(dsc)", "def verify_request_signature(req_info: StatusResponse) -> None:\n if not req_info.signature_check(req_info.xmlstr):\n raise ValueError(_(\"Message signature verification failure\"))", "def checksignature(self):\n if(self.name=='ORBIT'): return\n if(self.ctpnum==0): return\n cmd=\"CheckSignature(\"+self.board+\",\"+self.signature+\",\"+self.ctpnum+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print \"input checksignature: \",output\n #self.signatureM=", "def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")", "def test_signature_validation(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertTrue(app.utils.validate_signed_data(\n self._body,\n signature,\n settings.PUBLIC_KEY\n ))", "def verify_signature(self, key, data):\n verify_signature(self, key, data)", "def test_unexpected_error_in_signature(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggySignatureResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while retrieving signature' \\\n ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in signature()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def check(self):\n if self.is_signed():\n data = self._document.read()\n hash_value = data[-self._append_size+1:-1]\n data = data[:-self._append_size]\n\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n current_hash_value = encrypted[-16:]\n\n if current_hash_value != hash_value:\n print(\"Hash values did not matched!\")\n else:\n print(\"Hash values matched!\")\n else:\n print(\"The document is not signed!\")", "def is_signed(self):\n file_size = os.stat(self._file_name).st_size\n self._document.seek(file_size - self._append_size)\n last = self._document.read()\n self._document.seek(0)\n\n if not (chr(last[0]) == self._seperator and chr(last[-1]) == self._seperator):\n return False\n else:\n return True", "def test_create_image_signature(self):\n pass", "def verify():", "def check_fileName(session) -> 'bool':\n c = get_client()\n cursor = c.find({},{\"size\":1, \"_id\":0})\n print(session)\n for document in cursor:\n print(document)\n if hmac.compare_digest(session, document[\"size\"]):\n return True\n print(\"size \", document[\"size\"])\n return False", "def verify_apk_signature(self):\n verify.verify_apk_sig(self.apk_path) # raises CryptoVerificationError\n print(' - APK signature is valid')", "def verify(self, signature, body, external_aad, public_key):", "def verify(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = TomlSignatureFormatter().from_string(sign.read())\n\n if signature.verify(SignableBinaryIO(file), key):\n click.echo(\"---verified---\")\n exit(0)\n else:\n click.echo(\"---denied---\")\n exit(1)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except SignatureFormatError:\n click.echo(\"ERROR: Signature is in bad format\")", "def identify_file(self, file):", "def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")", "def test_getSignature(self):\n self.assertTrue(ChangeType().getSignature(0) is not '')", "def verify_signature(\n self, path: Union[bytes, str], digest: bytes, signature: bytes\n ):\n path = _to_bytes_or_null(path)\n ret = lib.Fapi_VerifySignature(\n self._ctx, path, digest, len(digest), signature, len(signature)\n )\n _chkrc(ret)", "def _get_signature(search_results: SearchResults) -> Text:\n # Was previously logic here. Leaving method in case it's needed again\n return COMMENT_SIGNATURE", "def _validate_signature(self):\n signing_string = '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(\n 'Message',\n self._message_encoded,\n 'MessageId',\n self._message_id,\n 'Timestamp',\n self._timestamp,\n 'TopicArn',\n self._topic_arn,\n 'Type',\n self._type)\n\n crt = crypto.load_certificate(crypto.FILETYPE_PEM, self._pem)\n signature = base64.b64decode(self._signature)\n\n try:\n crypto.verify(\n crt,\n signature,\n signing_string.encode('utf-8'),\n 'sha1')\n except:\n self.error = 'Invalid signature.'\n raise ValueError('Invalid signature.')\n\n return True", "def remove_sign(self):\n if self.is_signed():\n file_size = os.stat(self._file_name).st_size\n self._document.truncate(file_size - self._append_size)\n print(\"Sign removed from the document!\")\n else:\n print(\"The document is not signed!\")", "def check_specific_signatures(self):\r\n\r\n test1 = re.search(r'История операций по дебетовой карте за период', self.bank_text, re.IGNORECASE)\r\n # print(f\"{test1=}\")\r\n\r\n if not test1:\r\n raise exceptions.InputFileStructureError(\"Не найдены паттерны, соответствующие выписке\")", "def test_unique_signature_constraint(self):\n with self.assertRaisesRegex(ValidationError, 'Signature with this Agreement and Signatory already exists.'):\n new_test_sig = Signature(agreement=self.test_agreement,\n signatory=self.test_user,\n username=self.test_user.username,\n first_name=self.test_user.first_name,\n last_name=self.test_user.last_name,\n email=self.test_user.email,\n department=self.test_department)\n new_test_sig.full_clean()", "def verify (self, path):\n pass", "def verify(cls, data, sig):\n data_file = tempfile.mktemp()\n with open(data_file, 'w+b') as fout:\n fout.write(data.read())\n verification = settings.GPG.verify_file(sig, data_file)\n os.unlink(data_file)\n return verification", "def verify_file_signature(manifest_file, detached_signature, keyring, ignore_signature_errors):\n # type: (str, str, str, list[str]) -> None\n gpg_result, gpg_verification_rc = run_gpg_verify(manifest_file, detached_signature, keyring, display)\n\n if gpg_result:\n errors = parse_gpg_errors(gpg_result)\n try:\n error = next(errors)\n except StopIteration:\n pass\n else:\n reasons = []\n ignored_reasons = 0\n\n for error in chain([error], errors):\n # Get error status (dict key) from the class (dict value)\n status_code = list(GPG_ERROR_MAP.keys())[list(GPG_ERROR_MAP.values()).index(error.__class__)]\n if status_code in ignore_signature_errors:\n ignored_reasons += 1\n reasons.append(error.get_gpg_error_description())\n\n ignore = len(reasons) == ignored_reasons\n raise CollectionSignatureError(reasons=set(reasons), stdout=gpg_result, rc=gpg_verification_rc, ignore=ignore)\n\n if gpg_verification_rc:\n raise CollectionSignatureError(stdout=gpg_result, rc=gpg_verification_rc)\n\n # No errors and rc is 0, verify was successful\n return None", "def identify(source_path):\r\n binary_string = get(source_path)\r\n\r\n print(source_path + \" include following FILE SIGNATURES\")\r\n\r\n footer_result = get_signature_index(binary_string, footers)\r\n header_result = get_signature_index(binary_string, headers)\r\n\r\n if check_hidden_data(binary_string, header_result, footer_result):\r\n print('This file include hidden file.')\r\n\r\n for file_type, header_indexies in header_result.items():\r\n print('File type: '+file_type+' Detect: '+str(len(header_indexies))+' files')\r\n print('HEADER')\r\n result = ''\r\n for i, location in enumerate(header_indexies):\r\n if i == (len(header_indexies)-1):\r\n result += str(location[0]//2) + ' bytes - ' + str((location[1]-1)//2) + ' bytes'\r\n else:\r\n result += str(location[0]//2) + ' bytes - ' + str((location[1]-1)//2) + ' bytes, '\r\n print(result)\r\n\r\n print('FOOTER')\r\n result = ''\r\n if file_type in footer_result:\r\n footer_indexies = footer_result[file_type]\r\n for i, location in enumerate(footer_indexies):\r\n if i == (len(footer_indexies)-1):\r\n result += str(location[0]//2) + ' bytes - ' + str((location[1]-1)//2) + ' bytes'\r\n else:\r\n result += str(location[0]//2) + ' bytes - ' + str((location[1]-1)//2) + ' bytes, '\r\n print(result+'\\n')", "def sign(filename: str) -> str:\n fs, relative_path = url_to_fs(filename)\n try:\n return cast(str, fs.sign(relative_path))\n except NotImplementedError:\n return filename", "def verify(self):\n token = \"mytoken\" # set from wx server\n ll = []\n signature = self.get_argument(\"signature\", \"<none>\")\n ll.append(self.get_argument(\"timestamp\", \"<none>\"))\n ll.append(self.get_argument(\"nonce\", \"<none>\"))\n ll.append(token)\n ll.sort()\n m = hashlib.sha1()\n m.update(\"\".join(ll).encode(\"ascii\"))\n digest = m.hexdigest()\n\n if signature != digest:\n print(\"signature not match, discard this msg!\")\n return False\n else:\n print(\"signature match, got a wechat msg!\")\n return True", "def test_failed_signature(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertFalse(\n app.utils.validate_signed_data(\n {\n 'productCode': 'product-1'\n },\n signature,\n settings.PUBLIC_KEY\n )\n )", "def test_signature(self):\n with open(\"{}/{}\".format(self.APP_PATH, self.TARGET_PY_FILE),\n 'r', encoding=\"utf-8\", errors='ignore') as f:\n read_data = f.read()\n # Check [def predict()] section\n with self.subTest(name=\"[def handle()] in main.py\"):\n self.assertIsNotNone(\n re.search(r'def\\s+handle\\(\\w+\\)', read_data),\n msg=\"[def handle()] signature is missing or incorrect\")", "def _verify(pubkey: SupportedKeyTypes, sig: bytes, filehash: bytes, hashfunc: hashes.HashAlgorithm) -> None:\n if isinstance(pubkey, RSAPublicKey):\n pubkey.verify(sig, filehash, padding.PKCS1v15(), Prehashed(hashfunc))\n elif isinstance(pubkey, EllipticCurvePublicKey):\n pubkey.verify(sig, filehash, ec.ECDSA(Prehashed(hashfunc)))", "def is_signed_by_primary(blob, key_name, sig):\n # Assert that running on Replica.\n state = model.get_replication_state()\n assert state and state.primary_url, state\n # Grab the cert from primary and verify the signature. We are signing SHA512\n # hashes, since AuthDB blob is too large.\n certs = signature.get_service_public_certificates(state.primary_url)\n digest = hashlib.sha512(blob).digest()\n return certs.check_signature(digest, key_name, sig)", "def isSigned(file):\n\n if '|9999|' in file[-1]:\n return True\n else:\n return False", "def test_sign_file_unsupported_format(dummy_command, tmp_path, capsys):\n # FIXME: I'm not sure how to manufacture this in practice.\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\n \"unsupported format for signature\"\n )\n\n # Sign the file\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n )\n\n # An attempt to codesign was made\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n ],\n any_order=False,\n )\n\n # The console includes a warning about not needing a signature.\n assert \"... no signature required\\n\" in capsys.readouterr().out", "def sign_file_dialog():\n if X is None or Y is None:\n raise Exception(ERRORS.INVALID_AUTH)\n\n file_path = input(\"Enter file path: \")\n signature_name = input(\"Enter signature identity: \")\n\n for c in signature_name:\n ascii_c = ord(c)\n if not ((ascii_c >= 65 and ascii_c <= 90) or (ascii_c >= 97 and ascii_c <= 122) or (ascii_c >= 48 and ascii_c <= 57) or ascii_c == 95):\n raise Exception(ERRORS.INVALID_SIGNATURE_NAME)\n\n if not os.path.exists(file_path):\n raise Exception(ERRORS.INVALID_FILE)\n \n with open(file_path, \"rb\") as file:\n file_hash = hashlib.sha256(file.read()).hexdigest()\n file_hash_int = int(file_hash, 16)\n \n k = random.randint(1, Q-1)\n r = 0\n while r==0:\n r = int(pow(G, k, P)) % Q\n s = (pow(k,Q-2,Q)*(file_hash_int + X%Q*r%Q)%Q) % Q\n\n with open(f\"{signature_name}.sig\", \"w\") as file:\n file.write(str(r) + \"\\n\")\n file.write(str(s) + \"\\n\")\n \n print(f\"Signature {signature_name}.sig created successfully.\")", "def checksignature(self,activeinputs):\n for i in ['1','2','3']:\n inps=''\n start=1\n for j in activeinputs[i]:\n inps=inps+(int(j[0])-start)*'0'+'1'\n start=int(j[0])+1\n print 'checksignature inps= ',inps\n if inps != '': \n cmd=\"FindSignatures(\"+i+\",\"+'\"'+inps+'\"'+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print i,'output=',output,len(output)\n for j in range(0,len(output)-1,2):\n k=self.findinput(output[j],i)\n print k,' k j ',j\n if k != None:\n print 'checksignature: ',j,output[j],k,self.inputs[k].name\n self.inputs[k].signatureM=output[j+1]", "def test_check_canonical_filenames(self):\n contents = self.read_metadata_contents()\n family_metadata = Metadata.get_family_metadata(contents)\n for font_metadata in family_metadata.fonts:\n canonical_filename = self.create_canonical_filename(font_metadata)\n if canonical_filename != font_metadata.filename:\n self.fail('{} != {}'.format(canonical_filename,\n font_metadata.filename))", "def get_signature(self, signature_name=None):\n return None, None", "def verify_file(data_file, cert_file=None, signature_file=None, trust_dir=None):\n # Sanitize before appending signature extension\n data_file = os.path.realpath(data_file)\n\n if not signature_file:\n signature_file = data_file + '.' + EXT_SIGN\n if not cert_file:\n cert_file = data_file + '.' + EXT_CERT\n if not os.path.exists(signature_file) or not os.path.exists(cert_file):\n return SIGN_NO\n\n # Verify certificate\n cert_validity = verify_certificate(cert_file)\n if cert_validity == CERT_CORRUPTED:\n return SIGN_CORRUPTED\n\n # Check trustworthiness of certificate\n if trust_dir != None and check_trust(cert_file, trust_dir) == CERT_UNTRUSTED:\n return SIGN_UNTRUSTED\n\n # Keep public key in a temporary file\n pub_file = tempfile.NamedTemporaryFile()\n pub_file.write(get_public_key(cert_file))\n pub_file.flush()\n\n # Use OpenSSL to verify signature\n command = '/usr/bin/openssl dgst -sha1 -verify %s -signature %s %s'\n command = command % (pub_file.name, signature_file, data_file)\n command = shlex.split(command)\n\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n result = pipe.wait()\n\n # Destroy temporary files\n pub_file.close()\n\n if result == 0:\n if cert_validity == CERT_OK:\n return SIGN_OK\n else:\n return SIGN_SELF\n else:\n return SIGN_CORRUPTED", "def check_signature(signature, *args, **kwargs):\n return hmac.compare_digest(signature, create_signature(*args, **kwargs))", "def test_delete_image_signature(self):\n pass", "def test_invalid_signature(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n # validate signature is OK with the original key\n self.assertTrue(validate_signatures(bundle))\n key = bundle.keys.pop()\n _pk = base64.b64decode(key.public_key)\n # change the last byte of the public key\n _pk = _pk[:-1] + bytes([_pk[-1] + 1])\n new_key = Key(\n algorithm=key.algorithm,\n flags=key.flags,\n key_identifier=key.key_identifier,\n key_tag=key.key_tag,\n protocol=key.protocol,\n public_key=base64.b64encode(_pk),\n ttl=key.ttl,\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(InvalidSignature):\n validate_signatures(bundle)", "def check_cmd_signature(cmd_type, filename):\n log.debug('Checking command file %s (%s) signature', filename, cmd_type)\n\n # detect trust root file\n trust_container_filepath = os.path.join(CONFIG['active_config_files_path'],\n 'trust.bdoc')\n if cmd_type == 'trust' and not os.path.exists(trust_container_filepath):\n trust_container_filepath = filename\n\n try:\n open(trust_container_filepath)\n except OSError as err:\n err.strerror = \"Trust root not found: %s\" % trust_container_filepath\n raise err\n\n # execute verifier command\n cmd = ['ivxv-container-verifier', '-trust', trust_container_filepath,\n filename]\n proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n\n if proc.returncode:\n verifier_errors = {\n 64: 'Command was used incorrectly',\n 65: 'Failed to open container',\n 66: 'Input file did not exist or was not readable',\n 74: 'Failed read trust root',\n }\n err_msg = verifier_errors.get(proc.returncode, 'Unhandled error')\n raise subprocess.SubprocessError(': '.join([err_msg, proc.stderr]))\n\n # parse command output and create signatures list\n all_signatures = []\n for line in proc.stdout.strip().split('\\n'):\n if not re.match(r'.+,.+,[0-9]{11} ', line):\n raise LookupError('Invalid signature line: %s' % line)\n signer, timestamp_str = line.split(' ')\n timestamp = datetime.datetime.strptime(\n timestamp_str, RFC3339_DATE_FORMAT_WO_FRACT).timestamp()\n all_signatures.append([timestamp, signer, line])\n all_signatures = sorted(all_signatures)\n\n # check signers authorization for trust root config\n if cmd_type == 'trust':\n log.debug('Check signers authorization against trust root config')\n config = load_collector_command_file(cmd_type, filename)\n trusted_signers = config.get('authorizations', [])\n authorized_signatures = [\n [signature, 'admin']\n for timestamp, signer, signature in all_signatures\n if signer in trusted_signers]\n return authorized_signatures, all_signatures\n\n # detect permission for command type\n if cmd_type == 'technical':\n permission = PERMISSION_TECH_CONF\n elif cmd_type in CONFIG_TYPES or cmd_type in VOTING_LIST_TYPES:\n permission = PERMISSION_ELECTION_CONF\n else:\n assert cmd_type == 'user'\n permission = PERMISSION_USERS_ADMIN\n\n # check signers authorization for other config files\n log.debug(\n 'Check signers authorization against collector management database')\n authorized_signatures = []\n db = IVXVManagerDb()\n for timestamp, signer, signature in all_signatures:\n try:\n roles = db.get_value('user/{}'.format(signer))\n except KeyError:\n log.debug('No database record for signer %s', signer)\n continue\n authorized_signatures += [[signature, role]\n for role in roles.split(',')\n if permission in ROLES[role]['permissions']]\n db.close()\n\n return authorized_signatures, all_signatures", "def check(self, request, consumer, token, signature):\r\n built = self.sign(request, consumer, token)\r\n return built == signature", "def is_signature_valid(self, data, sig):\n if self.verified == False:\n return False\n\n key = self.publickey_set.filter(\n fingerprint=PublicKey.verify(data, sig).fingerprint,\n ).first()\n return key", "def signature_length(self):", "def verify_signature(signature_object, pubkey_info, content):\n if not CRYPTO: # pragma: no cover\n raise exceptions.UnsupportedLibraryError(NO_CRYPTO_MSG)\n\n formats.GPG_PUBKEY_SCHEMA.check_match(pubkey_info)\n formats.GPG_SIGNATURE_SCHEMA.check_match(signature_object)\n\n handler = SIGNATURE_HANDLERS[pubkey_info[\"type\"]]\n sig_keyid = signature_object[\"keyid\"]\n\n verification_key = pubkey_info\n\n # If the keyid on the signature matches a subkey of the passed key,\n # we use that subkey for verification instead of the master key.\n if sig_keyid in list(pubkey_info.get(\"subkeys\", {}).keys()):\n verification_key = pubkey_info[\"subkeys\"][sig_keyid]\n\n creation_time = verification_key.get(\"creation_time\")\n validity_period = verification_key.get(\"validity_period\")\n\n if (\n creation_time\n and validity_period\n and creation_time + validity_period < time.time()\n ):\n raise KeyExpirationError(verification_key)\n\n return handler.verify_signature(\n signature_object, verification_key, content, SHA256\n )", "def check_signature(signature, key, data):\n if isinstance(key, type(u'')):\n key = key.encode()\n \n digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest()\n \n # Covert everything to byte sequences\n if isinstance(digest, type(u'')):\n digest = digest.encode()\n if isinstance(signature, type(u'')):\n signature = signature.encode()\n \n return werkzeug.security.safe_str_cmp(digest, signature)", "def verify_signature(self):\n if self.get_contact_key:\n sender_key = self.get_contact_key(self.sender_handle)\n else:\n sender_key = fetch_public_key(self.sender_handle)\n if not sender_key:\n raise NoSenderKeyFoundError(\"Could not find a sender contact to retrieve key\")\n MagicEnvelope(doc=self.doc, public_key=sender_key, verify=True)", "def check_signature(blob, x509_certificate_pem, signature):\n # See http://stackoverflow.com/a/12921889.\n\n # Lazy import Crypto, since not all service that use 'components' may need it.\n from Crypto.Hash import SHA256\n from Crypto.PublicKey import RSA\n from Crypto.Signature import PKCS1_v1_5\n from Crypto.Util import asn1\n\n # Convert PEM to DER. There's a function for this in 'ssl' module\n # (ssl.PEM_cert_to_DER_cert), but 'ssl' is not importable in GAE sandbox\n # on dev server (C extension is not whitelisted).\n lines = x509_certificate_pem.strip().split('\\n')\n if (len(lines) < 3 or\n lines[0] != '-----BEGIN CERTIFICATE-----' or\n lines[-1] != '-----END CERTIFICATE-----'):\n raise CertificateError('Invalid certificate format')\n der = base64.b64decode(''.join(lines[1:-1]))\n\n # Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280).\n cert = asn1.DerSequence()\n cert.decode(der)\n tbsCertificate = asn1.DerSequence()\n tbsCertificate.decode(cert[0])\n subjectPublicKeyInfo = tbsCertificate[6]\n\n verifier = PKCS1_v1_5.new(RSA.importKey(subjectPublicKeyInfo))\n return verifier.verify(SHA256.new(blob), signature)", "def verify_apk_signature_fprs(self):\n if not self.apk_signature_fingerprints:\n print(' - No signature fingerprint was given')\n return\n\n real_fprs = verify.get_apk_sig_fpr(self.apk_path)\n for method, get_fpr in self.apk_signature_fingerprints:\n fpr = utils.clean_hexdigitstr(self._get_fpr(get_fpr))\n if real_fprs[method]:\n if fpr != real_fprs[method]:\n raise verify.CryptoVerificationError(\n file_name=self.apk_path,\n message='{0} fingerprint did not match. '\n 'Expected {1} but was {2}'.format(\n method, fpr, real_fprs[method]\n )\n )\n else:\n print(' - %s signature fingerprint matches' % method)", "def integrity_digsig_verify(self, signature: bytes, filehash: bytes, filehash_type: str) -> bool:\n fmt = \">BB\"\n if len(signature) < struct.calcsize(fmt):\n logger.warning(\"Malformed signature: not enough bytes\")\n return False\n\n typ, version = struct.unpack(fmt, signature[: struct.calcsize(fmt)])\n if typ not in [EvmImaXattrType.EVM_IMA_XATTR_DIGSIG, EvmImaXattrType.EVM_XATTR_PORTABLE_DIGSIG]:\n logger.warning(\"Malformed signature: wrong type\")\n return False\n\n if version == 2:\n return self._asymmetric_verify(signature, filehash, filehash_type)\n\n logger.warning(\"Malformed signature: wrong version (%d)\", version)\n return False", "def verify_data(data, signature_data, trust_dir):\n # Check header\n if not len(signature_data) or not signature_data.startswith(HEADER):\n return SIGN_NO\n else:\n try:\n header, cert_ascii, signature_ascii = signature_data.split(':')\n except ValueError:\n return SIGN_CORRUPTED\n if header != HEADER:\n return SIGN_CORRUPTED\n signature_binary = base64.b64decode(signature_ascii)\n cert_data = base64.b64decode(cert_ascii)\n\n # Keep certificate in a temporary file\n cert_file = tempfile.NamedTemporaryFile()\n cert_file.write(cert_data)\n cert_file.flush()\n\n # Keep signature in a temporary file\n signature_file = tempfile.NamedTemporaryFile()\n signature_file.write(signature_binary)\n signature_file.flush()\n\n # Keep data in a temporary file\n data_file = tempfile.NamedTemporaryFile()\n data_file.write(data)\n data_file.flush()\n\n # Verify\n result = verify_file(data_file.name, cert_file.name, signature_file.name, trust_dir)\n\n # Destroy temporary files\n cert_file.close()\n signature_file.close()\n data_file.close()\n\n return result", "def check_load(log, signatures):\n\n if token_dir is None:\n return True\n\n if not signing_keys:\n return True\n\n # The web sandbox should be enough.\n if renpy.emscripten:\n return True\n\n if verify_data(log, signatures):\n return True\n\n def ask(prompt):\n \"\"\"\n Asks the user a yes/no question. Returns True if the user says yes,\n and false otherwise.\n \"\"\"\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)\n\n if not ask(renpy.store.gui.UNKNOWN_TOKEN):\n return False\n\n new_keys = [ i for i in get_keys_from_signatures(signatures) if i not in verifying_keys ]\n\n if new_keys and ask(renpy.store.gui.TRUST_TOKEN):\n\n keys_text = os.path.join(token_dir, \"security_keys.txt\")\n\n with open(keys_text, \"a\") as f:\n for k in new_keys:\n f.write(encode_line(\"verifying-key\", k))\n verifying_keys.append(k)\n\n if not signatures:\n return True\n\n # This check catches the case where the signature is not correct.\n return verify_data(log, signatures, False)", "def verify(filename):\n verified = True\n errs = []\n\n if not os.path.exists(filename):\n verified = False\n errs.append('No such file: %s' % filename)\n \n errs = ['[!] ' + e for e in errs]\n\n if len(errs) > 0:\n print('\\n'.join(errs))\n\n return verified", "def compare_signature(public_key: str, signature: str, content: dict) -> bool:\n\n public_key = import_key(public_key)\n verifier = PKCS1_v1_5.new(public_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n\n return verifier.verify(h, binascii.unhexlify(signature))", "def test_pub(self):\n mock_fieldfile = Mock()\n mock_fieldfile.name = 'good_file_name.pem'\n try:\n key_file_validator(mock_fieldfile)\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def compare_signatures(self):\n my_decoded_sig = self._generate_signature().decode('utf-8')\n github_decoded_sig = self.github_signature.decode('utf-8')\n self.logger.info(f'*** INFO: My Signature: {my_decoded_sig}')\n self.logger.info(f'*** INFO: GitHub Signature: {github_decoded_sig}')\n return hmac.compare_digest(my_decoded_sig, github_decoded_sig)", "def test_sign_file_unknown_bundle_format(dummy_command, tmp_path, capsys):\n # Raise an error caused by an unknown bundle format during codesign\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\n \"bundle format unrecognized, invalid, or unsuitable\"\n )\n\n # Sign the file\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n )\n\n # An attempt to codesign was made\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n ],\n any_order=False,\n )\n\n # The console includes a warning about not needing a signature.\n assert \"... no signature required\\n\" in capsys.readouterr().out", "def is_signature_valid(self, data, sig):\n fingerprint = self.verify(data, sig).fingerprint\n if fingerprint == self.fingerprint:\n return self\n return None", "def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):\r\n signed_fields = params.get(signed_fields_key, '').split(',')\r\n data = u\",\".join([u\"{0}={1}\".format(k, params.get(k, '')) for k in signed_fields])\r\n signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))\r\n data += u\",signedFieldsPublicSignature=\" + signed_fields_sig\r\n returned_sig = params.get(full_sig_key, '')\r\n if processor_hash(data) != returned_sig:\r\n raise CCProcessorSignatureException()", "def _is_signature_valid(post_params):\r\n\r\n # Calculate the fields signature\r\n fields_sig = processor_hash(post_params.get('orderPage_signedFields'))\r\n\r\n # Retrieve the list of signed fields\r\n signed_fields = post_params.get('orderPage_signedFields').split(',')\r\n\r\n # Calculate the public signature\r\n hash_val = \",\".join([\r\n \"{0}={1}\".format(key, post_params[key])\r\n for key in signed_fields\r\n ]) + \",signedFieldsPublicSignature={0}\".format(fields_sig)\r\n\r\n public_sig = processor_hash(hash_val)\r\n\r\n return public_sig == post_params.get('orderPage_signaturePublic')", "def test_validate_ksk_proof_of_ownership_1(self):\n self._test_file(\"ksr-root-2009-q4-2.xml\")", "def testSignature(self):\n raw_data = copy.deepcopy(TEST_DATA)\n # Preserve only the first stack.\n raw_data['subtree_stacks'] = raw_data['subtree_stacks'][0:1]\n root_frame = (\n raw_data['subtree_stacks'][0]['frames'][raw_data['subtree_root_depth']])\n\n # Check that the function gets truncated to the max length.\n root_frame['function_name'] = 'x' * (SIGNATURE_MAX_LENGTH + 1)\n uma_data = UMASamplingProfilerData(\n raw_data, ChromeDependencyFetcher(self.GetMockRepoFactory()))\n self.assertEqual(uma_data.signature, 'x' * SIGNATURE_MAX_LENGTH)\n\n # Check that unsymbolized functions are properly handled.\n del root_frame['function_name']\n uma_data = UMASamplingProfilerData(\n raw_data, ChromeDependencyFetcher(self.GetMockRepoFactory()))\n self.assertEqual(uma_data.signature, 'unsymbolized function')", "def test_validate_ksk_proof_of_ownership_2(self):\n self._test_file(\"ksr-root-2010-q1-0.xml\")", "def test_too_short_signature_validity(self):\n bundle1, bundle2, = self._get_two_bundles(\n bundle1_inception=\"2019-01-01T00:00:00\",\n bundle1_expiration=\"2019-01-22T00:00:00\",\n bundle2_inception=\"2019-01-02T00:00:00\",\n bundle2_expiration=\"2019-01-10T00:00:00\",\n )\n xml = self._make_request(bundle1=bundle1, bundle2=bundle2)\n request = request_from_xml(xml)\n policy = replace(\n self.policy, check_bundle_intervals=False, check_cycle_length=False,\n )\n with self.assertRaises(KSR_POLICY_SIG_VALIDITY_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Bundle validity 8 days < claimed min_signature_validity 21 days (in bundle test-2)\",\n str(exc.exception),\n )", "def validate_signature(self):\n return self.signature == 0xAA55", "def check_signature(signature, data):\n if SIGNATURE_DISABLED:\n return True\n\n # check signature\n try:\n digest = hmac.new(\n SEGMENT_SHARED_SECRET.encode(), msg=data, digestmod=hashlib.sha1\n ).hexdigest()\n if digest == signature:\n return True\n else:\n print(f\"Invalid signature. Expected {digest} but got {signature}\")\n except KeyError:\n pass\n\n return False", "def test_signature_without_key(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n _sig = list(bundle.signatures)[0]\n new_sig = Signature(\n key_identifier=\"test id\",\n ttl=_sig.ttl,\n type_covered=_sig.type_covered,\n algorithm=_sig.algorithm,\n labels=_sig.labels,\n original_ttl=_sig.original_ttl,\n signature_expiration=_sig.signature_expiration,\n signature_inception=_sig.signature_inception,\n key_tag=1234,\n signers_name=_sig.signers_name,\n signature_data=_sig.signature_data,\n )\n bundle.signatures.add(new_sig)\n # test that the signature no longer validates\n with self.assertRaises(ValueError):\n validate_signatures(bundle)", "def VerifyWithFile(publicKey: str, value: ElementTree.Element) -> bool:\r\n lr = LicenceReader()\r\n lr.m_Licence1 = value\r\n return lr.Verify(publicKey)", "def verify_signature(self, payload, signature, timestamp, public_key=None):\n timestamped_payload = timestamp + payload\n decoded_signature = Signature.fromBase64(signature)\n\n key = public_key or self.public_key\n return Ecdsa.verify(timestamped_payload, decoded_signature, key)", "def verify_filename(filename):\n\n if is_fileobj(filename):\n raise ValueError(\"%r not a filename\" % filename)", "def test_sign_then_verify_unicode(self):\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': u'\\u2699',\r\n 'billTo_lastName': u\"\\u2603\",\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n\r\n verify_signatures(sign(params), signed_fields_key='orderPage_signedFields',\r\n full_sig_key='orderPage_signaturePublic')\r\n # if the above verify_signature fails it will throw an exception, so basically we're just\r\n # testing for the absence of that exception. the trivial assert below does that\r\n self.assertEqual(1, 1)", "def validate_signature(self, params):\n if \"signature\" not in params:\n raise SignatureValidationError(\"Parameters did not include a signature\")\n\n signature = params[\"signature\"]\n\n keys = params.keys()\n keys.sort()\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") \\\n for key in keys if key != \"signature\")\n computed_hash = base64.b64encode(hmac.new(self.app_secret, query_string, hashlib.sha256)\n .digest())\n\n if computed_hash != signature:\n raise SignatureValidationError(\"Invalid signature: \" + query_string)\n\n issued_at = iso8601.parse_date(params[\"issuedAt\"])\n expires_at = issued_at + timedelta(minutes=SIGNATURE_WINDOW_SIZE)\n if datetime.utcnow() > expires_at.replace(tzinfo=None):\n raise SignatureValidationError(\"Expired signature\")", "def test_validate_ksk_proof_of_ownership_4(self):\n self._test_file(\"ksr-root-2016-q3-0.xml\")", "def filename(self):\n return valid_filename(\n \"%s%s by %s.epub\" %\n (self.prefix, self.title, self.author))", "def _verify_file(self, filename, suffix=None, error_tag=None, silent=False):\n if error_tag is None:\n error_tag = 'File'\n if (filename is None) or (not os.path.isfile(filename)):\n if suffix is None: # pragma: no cover\n raise IOError(f\"{error_tag} dosn't exist: {filename}\")\n pattern = os.path.join(os.path.dirname(self.filename), '*' + suffix)\n files = glob.glob(pattern)\n if len(files) == 0: # pragma: no cover\n raise IOError(f\"{error_tag} could not be located matching: {pattern}\")\n else:\n filename = files[0]\n if not silent:\n print(f\"Using {error_tag.lower()} found at {filename}\")\n return filename", "def test_validate_ksk_proof_of_ownership_3(self):\n self._test_file(\"ksr-root-2010-q2-0.xml\")", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def verifySignature(self, message: bytes, signature: bytes, sigAlgo: SignatureAlgorithm) -> bool:\n\n # Convert parent type algos.SignedDigestAlgorithm to SignatureAlgorithm\n if not isinstance(sigAlgo, SignatureAlgorithm):\n sigAlgo.__class__ = SignatureAlgorithm\n\n # Convert plain ECDSA sig to x9.62 format\n if sigAlgo.isPlain:\n signature = ECDSA_X962_Signature.fromPlain(signature).dump()\n\n hash_algo = algo_utils.get_hash_algo_by_name(sigAlgo.hashAlgo)\n\n class Verifier:\n def __init__(self, vf):\n self._vf = vf\n def verify(self):\n return self._vf()\n\n def get_rsa_verifier(pub_key: rsa.RSAPublicKey):\n if sigAlgo.signature_algo == 'rsassa_pss':\n sig_algo_params = sigAlgo['parameters']\n assert 'mask_gen_algorithm' in sig_algo_params\n assert 'salt_length' in sig_algo_params\n\n mgf = sig_algo_params['mask_gen_algorithm']['algorithm'].native\n if 'mgf1' != mgf:\n raise ValueError(\"Invalid mask generation algorithm: {}\".format(mgf))\n\n mgf1_hash_algo = sig_algo_params['mask_gen_algorithm']['parameters']['algorithm'].native\n mgf1_hash_algo = algo_utils.get_hash_algo_by_name(mgf1_hash_algo)\n return Verifier(lambda:\n pub_key.verify(\n signature,\n message,\n padding.PSS(\n mgf = padding.MGF1(mgf1_hash_algo),\n salt_length = sig_algo_params['salt_length'].native\n ),\n hash_algo\n ))\n else:\n return Verifier(lambda:\n pub_key.verify(signature, message, padding.PKCS1v15(), hash_algo)\n )\n\n def get_ecdsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, ecc.ECDSA(hash_algo))\n )\n\n def get_eddsa_verifier(pub_key: ed25519.Ed25519PublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message)\n )\n\n def get_dsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, hash_algo)\n )\n\n # Get signature verifier\n if self.isRsaKey():\n verifier = get_rsa_verifier(self._pub_key)\n elif self.isEcKey():\n verifier = get_ecdsa_verifier(self._pub_key)\n elif self.isEdKey():\n verifier = get_eddsa_verifier(self._pub_key)\n else:\n verifier = get_dsa_verifier(self._pub_key)\n\n # Verify sig\n try:\n verifier.verify()\n except cryptography_exceptions.InvalidSignature:\n return False\n return True", "def validate_id_nonce_signature(\n cls, *, signature_inputs: TSignatureInputs, signature: bytes, public_key: bytes,\n ) -> None:\n ...", "def check_signature(signature_for_hash_type_f, public_key_blob, sig_blob, expected_hash_type=None):\n signature_type = ord(sig_blob[-1:])\n sig_pair = der.sigdecode_der(sig_blob[:-1])\n if expected_hash_type not in (None, signature_type):\n raise ScriptError(\"wrong hash type\")\n try:\n public_pair = sec_to_public_pair(public_key_blob)\n signature_hash = signature_for_hash_type_f(signature_type)\n v = ecdsa.verify(ecdsa.generator_secp256k1, public_pair, signature_hash, sig_pair)\n except EncodingError:\n v = 0\n return make_bool(v)", "def validate_sig(v):\n return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])", "def verify_filename(filename):\n extension = \".ics\"\n if not filename.endswith(extension):\n filename = filename + extension\n return filename", "def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)", "def verify_signature(message: bytes, sender_public_key: RsaKey) -> bytes:\n signature = message[:sender_public_key.size_in_bytes()] # Assume encryption has been done with same key size\n original_message = message[sender_public_key.size_in_bytes():]\n h = SHA256.new(original_message)\n verifier = pkcs1_15.new(sender_public_key)\n try:\n verifier.verify(h, signature)\n return original_message\n except ValueError:\n raise SignatureNotAuthentic", "def test_azure_sign(self):\n\n data = \"data\".encode(\"utf-8\")\n\n signer = Signer.from_priv_key_uri(self.azure_id, self.azure_pubkey)\n sig = signer.sign(data)\n\n print(sig.signature)\n\n self.azure_pubkey.verify_signature(sig, data)\n with self.assertRaises(UnverifiedSignatureError):\n self.azure_pubkey.verify_signature(sig, b\"NOT DATA\")", "def validate_signature(message):\n user_validation = UserValidation.validate_user(message['user_id'])\n if user_validation:\n return DataShare.validate_signature_from_message(message, public_key=user_validation), user_validation\n return False, None", "def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n\n (x1, y1, z1, _) = P = scalarmult_B(S)\n (x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))\n\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")", "def _validateFilename(self, filePath):\n # assert True\n raise NotImplementedError", "def verify_signature(request):\n\n secret = settings.GITHUB_WEBHOOK_SECRET\n header = request.headers.get(\"X-Hub-Signature\")\n\n if header is None:\n abort(403)\n\n if header[:5] != \"sha1=\":\n abort(403)\n\n signature = header[5:]\n\n mac = hmac.new(secret, msg=request.data, digestmod=\"sha1\")\n if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):\n abort(403)" ]
[ "0.66871303", "0.6619433", "0.6604733", "0.65458876", "0.6505863", "0.6505863", "0.63899714", "0.63860995", "0.63552636", "0.6281927", "0.61876994", "0.61288184", "0.6126398", "0.6071543", "0.60413754", "0.6026036", "0.60181516", "0.5999449", "0.59889215", "0.5982611", "0.59699494", "0.59397066", "0.5930195", "0.5906953", "0.5901288", "0.5883293", "0.58661187", "0.58624965", "0.5845882", "0.5835772", "0.5825143", "0.58211523", "0.58164", "0.5798908", "0.5774056", "0.57671165", "0.57638735", "0.5756924", "0.57526875", "0.57403654", "0.57299334", "0.57266474", "0.57252437", "0.5724725", "0.5702455", "0.5700296", "0.5696725", "0.5695594", "0.5686353", "0.5674008", "0.5667539", "0.56648153", "0.56637293", "0.5663007", "0.5627927", "0.5621245", "0.55955064", "0.5586135", "0.5584875", "0.55834323", "0.5559763", "0.5551179", "0.55510855", "0.5547334", "0.55429214", "0.553655", "0.5534655", "0.55293286", "0.5528879", "0.5527734", "0.5520967", "0.5518505", "0.55147564", "0.5509586", "0.55094045", "0.550883", "0.54967165", "0.5493216", "0.5492271", "0.5487028", "0.5486918", "0.5485097", "0.54845506", "0.54717815", "0.5462227", "0.5461289", "0.5456703", "0.54528975", "0.54448146", "0.54445434", "0.5438728", "0.5435505", "0.5435252", "0.54351777", "0.5433861", "0.54317325", "0.54303205", "0.54274935", "0.54182154", "0.54104483" ]
0.67912775
0
return path for file
def dataPath(self): return fl.File( self._path + '/renderLayerData.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def file_path(self):\n return self.lib.file_path", "def file_path(self):\n return posixpath.dirname(self.file_name)", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def path(self):\n return self.file_path()", "def get_file_path(self):\n return self._file_path", "def get_file_path(filename, path='Data/'):\n path= os.path.abspath(os.path.dirname(path))\n return os.path.join(path, filename)", "def file_path(self) -> global___Expression:", "def get_file_path(self,filename):\n return Path(self.resource_path,filename)", "def get_file(self):\n return self.dir + self.file_name + self.extension", "def filepath(self):\n return self.file.path", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")", "def file_path(self) -> Path:\n return self._input_file", "def fpath(self):\n return os.path.join(self.path, self.name)", "def get_file_path(filename: str):\n return TEMP_DIR.joinpath(filename)", "def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def file_path(self):\n return self._obs_file()", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def getPath(filename):\n\n if os.path.isabs(filename):\n pathfile = filename\n else:\n filename = filename.lstrip('/\\.')\n filename = filename.replace('/', '\\\\')\n pathfile = os.path.join(os.getcwd(), filename)\n \n return pathfile", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def file_path() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.abspath(ins.filename)", "def path(self):\n\n if self.file_func:\n path = self.file_func(self.lookup_obj, **self.pattern_params)\n return FilePath(path=path)\n return FilePath(path=\"\")", "def file_path(self):\n if not self._has_tmp_file_path():\n return None\n return self._get_tmp_file_path()", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def getFilePath(self, filename):\n idx = self._soundfiles.index(filename)\n return \"{}/{}\".format(self._soundpaths[idx], filename)", "def path(self) -> str:\n return self.src + \"/\"", "def get_file_from_path(file_path):\n return Utils.get_real_file_path(file_path)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def FilePath(self) -> str:", "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, \"cifar-10-batches-py/\", filename)", "def path(self):\n return os.path.dirname(os.path.abspath(self._filename))", "def filepath(self):\n return self._filepath.path", "def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def get_filename(cls):\n return osp.join(cls.dir_location, *cls.file_path)", "def get_path(filename):\n\tif filename != \"\":\n\t\treturn filename\n\telse:\n\t\tfilename = \".\"", "def input_path(self, filename):\n\n return self.filename_path_join(self.input_dir, filename)", "def filepath(filename, data, root='/home/cyneo/Work/Scans/Processed Data/',\r\n filetype='.csv'):\r\n path = os.path.abspath(root + data + '/' + filename +\r\n ' ' + data + filetype)\r\n return path", "def get_filepath(self, name):\r\n return os.path.join(self._folder, name)", "def get_file_full_path(self):\n return self.save_dir + os.sep + self.save_file_without_ext + self.save_file_ext", "def get_file_path(filename):\n here_dir = os.path.dirname(os.path.abspath(__file__))\n file_dir = os.path.join(here_dir, \"../data/\", filename)\n\n return file_dir", "def _getAbsolutePath(self, filename):\n\n # find the correct path, in the experiment file they are either\n # relative to the experiment file, or an absolute path\n if filename != os.path.abspath(filename):\n return os.path.join(self._path, filename)\n else:\n return filename", "def get_path(d, f):\n path = os.path.join(d, f)\n check_file(path)\n return path", "def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))", "def filepath(self):\n return self.filepath_", "def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def file_location(self, file: str) -> str:\n return os.path.join(str(self.id), file)", "def __make_path(self, filename):\n return self.__path() + os.sep + filename", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def filePath(self):\n # get filename\n filename = tkFileDialog.askopenfilename()\n return str(filename)", "def format_path(file: str) -> str:\n return os.path.abspath([file.replace('/', os.path.sep)][0])", "def input_path(self, filename):\n\n return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))", "def get_file_path_in_project_directory(filename): \n DIR = os.path.dirname(os.path.abspath(\"__file__\")) \n path = os.path.join(DIR, filename)\n return path", "def filepath(self):\n return self._filepath", "def filepath(self):\n return self._filepath", "def __get_path(self):\n return self.path", "def temporary_file_path(self):\n return self.file.name", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def pathToFileName(self, path):\n\t\t# Find the path, and strip the leading slash.\n\t\tpath =urlparse.urlparse(self.path)[2].lstrip(\"/\")\n\t\t# Process url escape codes, and normalize the path.\n\t\tpath = os.path.normpath(urllib2.unquote(path))\n\t\t# normpath strips the last slash\n\t\tif os.path.isdir(path):\n\t\t\treturn path + '/'\n\t\telse:\n\t\t\treturn path", "def path(self):\n return self._data_file", "def get_full_path(file_extension=True) -> str:\n return get_directory() + \"/\" + get_filename(file_extension=file_extension)", "def get_path(self) -> Union[str, 'BytesIO']:\n return self._filepath", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def getFile(self, stamp):\n name = escapeForPath(str(stamp))\n return os.path.join(self.path, name)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def tfile_path(filename):\n here = os.path.dirname(__file__)\n return '{0}/static/files/{1}'.format(here, filename)", "def get_file_path(cls, file_name, folder_name):\n return cls.file_path.parent / folder_name / file_name", "def saved_file_path_string(self):\n return self.saved_file_path.as_posix()", "def path(self):\n return self.get_upload_set().path(self.filename)", "def get_path(self):\n return self.path", "def get_test_file_path(filename):\n file_path = Path(__file__).parent / \"files\" / filename\n\n if not file_path.resolve().exists():\n raise FileNotFoundError(\n f\"File {filename} not found in {file_path.parent}\"\n )\n\n return str(file_path)", "def resource_path(self, resource):\n # type: (Text) -> Text\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError", "def GetFileName(self):\n return self.file.GetPath()", "def _pf(file_path):\n return os.path.join(self.wf_path, file_path)", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def selected_filepath(self):\n return self.__make_path(self.selected_filename)", "def file_path(instance, filename):\n hashcode = hash(filename)\n mask = 255 # bitmask\n # use the first and second bytes of the hash code represented as\n # zero-padded hex numbers as directory names\n # provides 256 * 256 = 65536 of possible directory combinations\n dir1 = \"{:0>2x}\".format(hashcode & mask)\n dir2 = \"{:0>2x}\".format((hashcode >> 8) & mask)\n # Galaxy doesn't process names with parentheses in them\n filename = re.sub('[()]', '_', filename)\n return os.path.join(dir1, dir2, filename)", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)", "def pathInDir(self, fileName):\n path = os.path.abspath(os.path.join(self.myDir, fileName))\n self.checkPath(path)\n return path", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def filepath(p):\n if os.path.isfile(p):\n return os.path.realpath(p)\n else:\n raise ArgumentTypeError('{} is not a file.'.format(p))", "def get_path_filename(handle):\n path = config['path'].strip('/').strip()\n return path + '/' + handle + config['extension']", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def step_file_path(self, string):\n if not self.has_step_field(string):\n return None\n # TODO handle url\n root_dir = self.root_dir()\n if root_dir:\n path = os.path.join(root_dir, self.step_field(string))\n return os.path.realpath(path)\n return os.path.realpath(self.step_field(string))", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def _get_file_path(self):\n self.select_pdf()\n self.file_path_label.configure(\n text=self._shorten_file_name())\n self.file_path_label.grid(row=0, column=1)", "def get_file_path(instance, filename):\n\text = filename.split('.')[-1]\n\tfilename = '%s.%s' % (uuid.uuid4(), ext)\n\treturn os.path.join('upload', filename)", "def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")", "def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)" ]
[ "0.8236138", "0.8024989", "0.80158114", "0.79347897", "0.7926399", "0.79031473", "0.786907", "0.78308654", "0.7762692", "0.77185005", "0.76789683", "0.7646001", "0.7616632", "0.76083356", "0.75883675", "0.7509448", "0.74975693", "0.7472752", "0.7454609", "0.7448224", "0.74452096", "0.7444855", "0.74357957", "0.73919487", "0.73903745", "0.73864275", "0.73859715", "0.73700225", "0.7353565", "0.73225576", "0.73222965", "0.7309661", "0.7308208", "0.72952116", "0.7290195", "0.72885394", "0.7259006", "0.72578585", "0.72559017", "0.72301084", "0.72286284", "0.7213492", "0.72004986", "0.7188518", "0.7181277", "0.7172813", "0.7171579", "0.71461403", "0.71460336", "0.7109684", "0.7108191", "0.71071446", "0.71071446", "0.7103689", "0.7072966", "0.70583844", "0.7032687", "0.7027298", "0.70182824", "0.7017095", "0.7016875", "0.7016875", "0.7001032", "0.6996157", "0.6991198", "0.69870394", "0.69790024", "0.6938795", "0.69322276", "0.6924795", "0.69247925", "0.6916548", "0.6914202", "0.6913466", "0.69112253", "0.69092935", "0.69086206", "0.6907503", "0.6906896", "0.6899503", "0.68893373", "0.6887196", "0.68725085", "0.6870847", "0.68696916", "0.6864943", "0.6855225", "0.68540543", "0.68540543", "0.6853483", "0.6851728", "0.68485856", "0.6844076", "0.684145", "0.6839469", "0.68385714", "0.6830281", "0.682114", "0.6818535", "0.6808786", "0.68035144" ]
0.0
-1
return path for lights file
def lightPath(self): return mfl.mayaFile( self._path + '/lights.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def darkpath(cam):\n return os.path.join(BASEPATH, cam + \"_dark\")", "def path_for(filename):\n if settings.value(Key.Theme) == Themes.Light.value:\n return (IMAGES_PATH / Themes.Light.value / filename).as_posix()\n return (IMAGES_PATH / Themes.Dark.value / filename).as_posix()", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def path(self) -> str:\n return self.src + \"/\"", "def sirsam_target_path(data_sirsam):\n return os.path.join(data_sirsam, 'targets', 'geochem_sites_log.shp')", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def environmentImagesPath():\n # A recursion counter to make sure that the loop ends.\n count = 0\n # Get the path to the Blender executable.\n filePath = os.path.dirname(bpy.app.binary_path)\n # Find the lowest path level which contains Blender.\n while \"blender\" not in os.path.basename(filePath).lower():\n filePath = os.path.dirname(filePath)\n if not filePath or count == 20:\n break\n count += 1\n\n # Search all subpaths for the datafiles folder. Based on this folder\n # the path can be completed.\n for dirPath, dirs, fileList in os.walk(filePath):\n if os.path.basename(dirPath) == \"datafiles\":\n return os.path.join(os.path.join(dirPath, \"studiolights\"), \"world\")", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')", "def flatpath(cam):\n return os.path.join(BASEPATH, cam + \"_flats\")", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def file_path(self):\n return self._obs_file()", "def path(self):\n return self.file_path()", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def file_path(self):\n return self.lib.file_path", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def FilePath(self) -> str:", "def neighbordb_path():\n\n filepath = runtime.default.data_root\n filename = runtime.neighbordb.filename\n return os.path.join(filepath, filename)", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def fpath(self):\n return os.path.join(self.path, self.name)", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def aovsPath(self):\n\t\treturn fl.File( self._path + '/aovs.data' )", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def darkfiles(cam):\n return fullpathlist(darkpath(cam))", "def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def filepath(self):\n return self.file.path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def full_path(name: str) -> Path:\n return PROGRAM_PATH / 'data' / 'saves' / name", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def outputPath():\n scenePath = bpy.data.filepath\n # If the scene hasn't been saved yet the path is empty.\n # Returning an empty path prompts the user for saving the scene.\n if not scenePath:\n return\n renderPath = os.path.join(os.path.dirname(scenePath), \"{}_thumbs\".format(NAME))\n return renderPath", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def shaderPath(self):\n\t\treturn mfl.mayaFile( self._path + '/shaders.ma' )", "def path_to_calib_dir_src_custom(self):\n return cp.calib_dir_src.value()", "def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def file_path(self) -> global___Expression:", "def getPath(project):\n if project == '.sourglass':\n path = project\n else:\n path = os.path.join(basepath, 'logs', project + '.csv')\n try:\n open(path)\n except IOError:\n f = open(path, 'w')\n f.close()\n print(\"Started new project.\")\n return path\n else:\n return path", "def BuildFilePath(self, ThisRun, FileName):\n\t\tif (ThisRun):\n\t\t\tFileNamePath = 'extra/MonitorGraph/'+FileName\n\t\telse:\n\t\t\tFileNamePath = 'extra/MonitorGraph/defaults/def_'+FileName\n\t\treturn FileNamePath", "def get_pathname(self):\n return self.image_data.path", "def path_to_calib_dir_custom(self):\n return cp.calib_dir.value()", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")", "def pathtofolder():\n return os.getcwd()", "def path(self):\n return self.lib.path", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def get_scanrecpath(self):\n start_key = min(self.obsinfos)\n scanrecname = self.obsinfos[start_key].obsfoldername(\n source_name=self.scanrecparms['pointing'])\n scanrecpath = os.path.join(self.scanpath, scanrecname)\n return scanrecpath", "def get_output_path():\n\n path = rs.DocumentPath()\n name = rs.DocumentName()\n \n if gc.operating_system == \"mac\":\n\n path = path[:-len(name)] + \"_system.dat\"\n\n elif gc.operating_system == \"win\":\n\n i = path.rfind(\"\\\\\")\n\n path = path[:i] + \"/_system.dat\" \n\n return path", "def get_filepath(filename):\n return os.path.join(\"datasets\", filename)", "def filepath(self):\n return self.filepath_", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def logpath(self):\n return self.outpath", "def _get_log_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".log.txt\")", "def filepath(self):\n return self._filepath.path", "def feature_path(self) -> str:\n return join(self.directory_path, 'features')", "def path(self):\n return self._data_file", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def device_path(self):\n return self._engine.device_path()", "def path(self):\n return os.path.join(self._project.path, self.h5_path[1:]).replace(\"\\\\\", \"/\")", "def get_background_pictures_path(self):\n file_path = os.path.dirname(__file__)\n file_path = os.path.join(file_path, \"Background\")\n return file_path", "def path(self):\n return os.path.join(FLOWJS_PATH, self.filename)", "def iwpath(self):\n return self.server + self.articlepath", "def get_bitstream_path():\n\n env = get_env()\n\n # Derive destination path\n cache_dir = os.getenv(\"VTA_CACHE_PATH\", os.path.join(os.getenv(\"HOME\"), \".vta_cache/\"))\n cache_dir = os.path.join(cache_dir, env.TARGET)\n cache_dir = os.path.join(cache_dir, env.HW_VER.replace(\".\", \"_\"))\n # Create the directory if it didn't exist\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n bit_path = os.path.join(cache_dir, env.BITSTREAM) + \".bit\"\n\n return bit_path", "def get_mask_path(self, file_path: str) -> str:", "def get_log_path():\n return LOG_PATH", "def get_id_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n id_path = os.path.join(root, \"client\\\\files\\\\id.txt\")\n\n return id_path", "def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def _getFullPath(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self)._getFullPath(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def get_filepath(self, name):\r\n return os.path.join(self._folder, name)", "def flag_file(self):\n return os.path.join(self.flag_dir, self.flag_name)", "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, \"cifar-10-batches-py/\", filename)", "def path(self):\n ...", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def get_des_lensdir():\n d=get_lensdir()\n return os.path.join(d, 'des-lensing')", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def get_file_path(self,filename):\n return Path(self.resource_path,filename)", "def get_image_path_instrument1(instance, filename):\n return os.path.join('uploads', 'instrument', str(instance.siteId.pk))", "def nomenclatures_filepath() -> str:\n\n return resource_filename(__name__, \"nomenclatures.json\")", "def info_file_path_abs(self) -> Path:\n return Path(self._repo.working_tree_dir, \"INFO.yaml\")", "def get_file_save_path(self):\n return self.out", "def get_plato_path():\n\treturn \"/tsi/\"", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)", "def abspath(self, fileid):\n # Find the directory, relative from the corpus root.\n name = fileid.split('.')[0]\n category = fileid.split('_')[0]\n # Create the pickle file extension\n basename = name + '.pickle'\n\n # Return the path to the file relative to the target.\n return os.path.normpath(os.path.join(self.target, category, basename))", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def s2_band():\n return os.path.join(TESTDATA_DIR, \"s2_band.tif\")", "def get_texture_path(filepath, tex_num, scene):\n\n path, fname = filepath.rsplit(os.sep, 1)\n props = scene.revolt\n\n # Checks if the loaded model is located in the custom folder\n folder = path.rsplit(os.sep, 1)[1]\n if folder == \"custom\":\n path = path.rsplit(os.sep, 1)[0]\n folder = path.rsplit(os.sep, 1)[1]\n\n if not os.path.isdir(path):\n return None\n\n # The file is part of a car\n if props.prm_check_parameters and \"parameters.txt\" in os.listdir(path):\n filepath = os.path.join(path, \"parameters.txt\")\n if not filepath in PARAMETERS:\n PARAMETERS[filepath] = read_parameters(filepath)\n tpage = PARAMETERS[filepath][\"tpage\"].split(os.sep)[-1]\n\n return os.path.join(path, tpage)\n\n # The file is part of a track\n elif is_track_folder(path):\n tpage = folder.lower() + chr(97 + tex_num) + \".bmp\"\n return os.path.join(path, tpage)\n else:\n return os.path.join(path, \"dummy{}.bmp\".format(chr(97 + tex_num)))", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)", "def get_hookscript_path ( self ):\n return self.hook_script_fspath", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")" ]
[ "0.82484734", "0.6809375", "0.66981214", "0.6469427", "0.6256557", "0.6256362", "0.62507707", "0.6244421", "0.62019056", "0.61183137", "0.61059153", "0.6080485", "0.6063016", "0.60597664", "0.60492027", "0.603537", "0.6034855", "0.602494", "0.5999086", "0.59892964", "0.59726596", "0.5952461", "0.5952201", "0.5946454", "0.5940829", "0.5936752", "0.59068924", "0.5887162", "0.58870095", "0.58761823", "0.58629674", "0.5856616", "0.5856616", "0.5845764", "0.5844262", "0.58371365", "0.5823033", "0.58098066", "0.5809602", "0.5808024", "0.5800725", "0.57901245", "0.57853913", "0.57667154", "0.5764701", "0.5752582", "0.5744665", "0.5744582", "0.57430893", "0.57404304", "0.5740232", "0.5737468", "0.57229257", "0.572126", "0.572126", "0.572126", "0.5718448", "0.57118636", "0.57005936", "0.56818277", "0.5680806", "0.5677973", "0.566753", "0.5665734", "0.56556046", "0.5650774", "0.5644109", "0.56387186", "0.56363416", "0.56266767", "0.5623554", "0.5620808", "0.5612734", "0.56046605", "0.5598765", "0.55968124", "0.5595792", "0.5594743", "0.55930805", "0.5588811", "0.5581785", "0.5579135", "0.5578047", "0.5568249", "0.5565848", "0.5559995", "0.55566937", "0.5554952", "0.5552698", "0.5551447", "0.55483544", "0.55430764", "0.55405635", "0.55351883", "0.5534921", "0.5533569", "0.55329484", "0.55324554", "0.5525972", "0.5524882" ]
0.820123
1
return the path for the shader file
def shaderPath(self): return mfl.mayaFile( self._path + '/shaders.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFragmentShader(self):\n return self.fshader", "def getCompiled(self):\n if self.isCompiled():\n return self.shader\n else:\n raise Exception(\"el shader no ha sido compilado aun\")", "def dataShader(self):\n\t\treturn self._shader", "def location( self, shader, mode ):\n return shader.getLocation( mode, self.name, uniform=True )", "def printShader(self):\n print self.file", "def get_shader_codes(self):\n vs = VS_TEMPLATE\n fs = FS_TEMPLATE\n \n # Shader headers\n vs_header = self.get_header('vertex')\n fs_header = self.get_header('fragment')\n \n # Varyings\n for varying in self.varyings:\n s1, s2 = get_varying_declarations(varying)\n vs_header += s1\n fs_header += s2\n \n # vs_header += \"\".join(self.vs_headers)\n # fs_header += \"\".join(self.fs_headers)\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_HEADER%\", vs_header)\n fs = fs.replace(\"%FRAGMENT_HEADER%\", fs_header)\n \n # Vertex and fragment main code\n vs_main = self.get_main('vertex')\n fs_main = self.get_main('fragment')\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_MAIN%\", vs_main)\n fs = fs.replace(\"%FRAGMENT_MAIN%\", fs_main)\n \n # frag color or frag data\n if self.fragdata is None:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragColor = out_color;\"\"\")\n else:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragData[%d] = out_color;\"\"\" % self.fragdata)\n \n # Make sure there are no Windows carriage returns\n vs = vs.replace(b\"\\r\\n\", b\"\\n\")\n fs = fs.replace(b\"\\r\\n\", b\"\\n\")\n \n # OLDGLSL does not know the texture function\n if not OLDGLSL:\n fs = fs.replace(\"texture1D(\", \"texture(\" % 2)\n fs = fs.replace(\"texture2D(\", \"texture(\" % 2)\n \n # set default color\n fs = fs.replace('%DEFAULT_COLOR%', str(self.default_color))\n \n # replace GLSL version header\n vs = vs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n fs = fs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n \n # replace GLSL precision header\n vs = vs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n fs = fs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n \n return vs, fs", "def __str__(self):\n if self.fshader is None:\n f = \"not defined\"\n else:\n f = self.fshader.getPath()\n if self.vshader is None:\n v = \"not defined\"\n else:\n v = self.vshader.getPath()\n if self.enabled:\n e = \"enabled\"\n else:\n e = \"disabled\"\n if self.isCompiled():\n c = \"compiled | {0}\".format(e)\n else:\n c = \"not compiled | {0}\".format(e)\n return \"shader: {3}\\nfragment shader: {0}\\nvertex shader: {1}\\nstatus: {2}\".format(f, v, c, self.getName())", "def _path(name: str):\n return os.path.join(ASSET_PATH, name)", "def file_path(self) -> global___Expression:", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def path(self) -> str:\n return self.src + \"/\"", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def loadShader(shaderpath, shadername, vertexFormatList=None, fragmentFormatlist=None):\n fragment = Shader(shaderpath + shadername + \".fsh\", FRAGMENT, True, fragmentFormatlist)\n vertex = Shader(shaderpath + shadername + \".vsh\", VERTEX, True, vertexFormatList)\n return ShaderProgram(vertex, fragment, True)", "def glGetShaderSourceARB( baseOperation, obj ):\n length = int(glGetObjectParameterivARB(obj, GL_OBJECT_SHADER_SOURCE_LENGTH_ARB))\n if length > 0:\n source = ctypes.create_string_buffer(length)\n baseOperation(obj, length, None, source)\n return source.value.strip(_NULL_8_BYTE) # null-termination\n return ''", "def getVertexShader(self):\n return self.vshader", "def get_path(self):\n return StaticAsset.get_static_path(self._name)", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def path(self):\n return os.path.join(FLOWJS_PATH, self.filename)", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def instantiate_for_spirv_args(self, testcase):\n shader, self.filename = tempfile.mkstemp(\n dir=testcase.directory, suffix=self.suffix)\n shader_object = os.fdopen(shader, 'w')\n shader_object.write(self.source)\n shader_object.close()\n return self.filename", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def outputPath():\n scenePath = bpy.data.filepath\n # If the scene hasn't been saved yet the path is empty.\n # Returning an empty path prompts the user for saving the scene.\n if not scenePath:\n return\n renderPath = os.path.join(os.path.dirname(scenePath), \"{}_thumbs\".format(NAME))\n return renderPath", "def convert_shaders(self):\n raise NotImplementedError()", "def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def get_asset_path(name):\n return os.path.join(constants.ROOT_DIR, 'assets', name)", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def importShaders(self):\n\t\tif self.shaderPath.exists:\n\t\t\tself.shaderPath.imp()", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def file_path(self):\n return self.lib.file_path", "def get_static_path(path, aid, filename):\n return os.path.join(path, aid, os.path.basename(filename))", "def _rel_path(fn):\n return os.path.join('./eng-edu/ml/cc/src', fn)", "def get_tool_source_path(self, uri_like):", "def assets_path(self) -> str:\n return self._assets_path", "def get_regression_path(cls) -> str:\n return os.path.join(cls.get_regression_root_path(), cls.get_relative_regression_path())", "def resourcePath(self,relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n output = base_path + relative_path\n return output", "def source_file_path(self) -> str:\n return self._source_file_path", "def _get_asset_dir(self, database):\n if not database:\n return 'assets'\n\n path = os.path.dirname(database)\n return os.path.join(path, 'assets')", "def fpath(self):\n return os.path.join(self.path, self.name)", "def path_for(filename):\n if settings.value(Key.Theme) == Themes.Light.value:\n return (IMAGES_PATH / Themes.Light.value / filename).as_posix()\n return (IMAGES_PATH / Themes.Dark.value / filename).as_posix()", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def glr_path_static():\n return os.path.join(base_path, \"static\")", "def _get_input_filepath(self, game_id: int) -> str:\n with self._db_connection as connection:\n with connection.cursor() as cursor:\n cursor.execute('SELECT sgf_content FROM games WHERE id=%s', (game_id,))\n if cursor.rowcount == 0:\n raise GameNotFoundError()\n sgf_content, = cursor.fetchone()\n file_descriptor, filepath = tempfile.mkstemp('.sgf')\n with open(file_descriptor, 'wb') as file:\n file.write(sgf_content)\n return filepath", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def output_file_path(self):\n return self.__output_file_path", "def path(self):\n return self.file_path()", "def get_file_path(self,filename):\n return Path(self.resource_path,filename)", "def scriptpath(self, code) -> str:\n return ''", "def get_sql_path(file_path: str) -> str:\n dag_dir = configuration.get('core', 'dags_folder')\n return os.path.join(dag_dir, file_path)", "def get_asset_path(test):\n return DEVICE_ASSETS_PATH + os.path.basename(test)", "def get_file_path(name: str) -> str:\n return os.path.join(DIR, 'scores', f'{name}.json')", "def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)", "def getProgramFile(self) -> java.io.File:\n ...", "def get_source_path(self):\n return self.source_path", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def path(self):\n return self._data_file", "def file_path(self) -> Path:\n return self._input_file", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def path(self) -> str:\n return os.path.join(DIR_CACHE_DATASETS, f\"{self.name}.parquet\")", "def input_path(self, filename):\n\n return self.filename_path_join(self.input_dir, filename)", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def path(self) -> str:\n return os.path.join(DIR_CACHE_TABLES, f\"{self.name}.parquet\")", "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, \"cifar-10-batches-py/\", filename)", "def showshaderlog(self, shader):\r\n N = 1024\r\n log = (ctypes.c_char * N)()\r\n loglen = ctypes.c_int()\r\n opengles.glGetShaderInfoLog(\r\n shader, N, ctypes.byref(loglen), ctypes.byref(log))\r\n print('shader {}, {}'.format(self.shfile, log.value))", "def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)", "def uniform(self, name):\n return glGetUniformLocation(self.program(), name.encode('utf_8'))", "def get_texture_path(filepath, tex_num, scene):\n\n path, fname = filepath.rsplit(os.sep, 1)\n props = scene.revolt\n\n # Checks if the loaded model is located in the custom folder\n folder = path.rsplit(os.sep, 1)[1]\n if folder == \"custom\":\n path = path.rsplit(os.sep, 1)[0]\n folder = path.rsplit(os.sep, 1)[1]\n\n if not os.path.isdir(path):\n return None\n\n # The file is part of a car\n if props.prm_check_parameters and \"parameters.txt\" in os.listdir(path):\n filepath = os.path.join(path, \"parameters.txt\")\n if not filepath in PARAMETERS:\n PARAMETERS[filepath] = read_parameters(filepath)\n tpage = PARAMETERS[filepath][\"tpage\"].split(os.sep)[-1]\n\n return os.path.join(path, tpage)\n\n # The file is part of a track\n elif is_track_folder(path):\n tpage = folder.lower() + chr(97 + tex_num) + \".bmp\"\n return os.path.join(path, tpage)\n else:\n return os.path.join(path, \"dummy{}.bmp\".format(chr(97 + tex_num)))", "def _path(self):\n path = REQUIRES['static_url']\n\n # add paths as specified\n for prefix, subpath in self.getPrefixDict().items():\n if ( self.filename.startswith(prefix) ):\n path += subpath\n break;\n\n return path", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def get_fmu_file_path(self):\n return self.fmu_file", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def file_path(self):\n return self._obs_file()", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def file_path() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.abspath(ins.filename)", "def get_path(self):\r\n if self.__matrix_type == matrix_types.PAM250:\r\n return matrix_types.PAM250_PATH\r\n\r\n elif self.__matrix_type == matrix_types.BLOSUM62:\r\n return matrix_types.BLOSUM62_PATH", "def getFastPath() -> str:\n return _FAST_PATH", "def file_path(name):\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_dir, 'data', name.lower())", "def sirsam_target_path(data_sirsam):\n return os.path.join(data_sirsam, 'targets', 'geochem_sites_log.shp')", "def script_path(filename):\n import os\n\n filepath = os.path.join(os.path.dirname(__file__))\n return os.path.join(filepath, filename)", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def addShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def _localfile(name):\n return os.path.abspath(resource_filename(__name__, name))", "def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")", "def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )", "def get_weight_file(self) -> Path:\n return Path(Path(self._local_path) / self._weight_file)", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))" ]
[ "0.6595272", "0.6361699", "0.634404", "0.63148177", "0.6163704", "0.614001", "0.610097", "0.6075785", "0.6053845", "0.5924668", "0.58730847", "0.5861786", "0.58363956", "0.58298504", "0.58051777", "0.579176", "0.578321", "0.5744046", "0.5737306", "0.5688931", "0.56700575", "0.56690216", "0.5662235", "0.5617415", "0.561232", "0.56094676", "0.56071514", "0.56030697", "0.56013405", "0.55884683", "0.5583729", "0.5583729", "0.5564299", "0.55426097", "0.55397177", "0.55346495", "0.55244035", "0.55212677", "0.5515387", "0.5511374", "0.5506251", "0.5485205", "0.5483801", "0.5483733", "0.54810154", "0.5467056", "0.54455316", "0.54454595", "0.5442217", "0.54352784", "0.54281646", "0.5424086", "0.542302", "0.5419841", "0.5414982", "0.5413565", "0.54069674", "0.54023844", "0.5397295", "0.53896016", "0.53849506", "0.53818965", "0.53790635", "0.53735447", "0.53695935", "0.53550965", "0.5350708", "0.53484786", "0.53458136", "0.5339452", "0.5329822", "0.5319385", "0.53148186", "0.53147095", "0.5313044", "0.53065544", "0.53053856", "0.5295881", "0.52866495", "0.52840114", "0.52827656", "0.5271821", "0.52703214", "0.52695596", "0.5268003", "0.5263448", "0.52553177", "0.52522355", "0.5251812", "0.5251117", "0.5248811", "0.52361596", "0.52311546", "0.52256256", "0.52119577", "0.520952", "0.52068937", "0.52068937", "0.5205418", "0.52044076" ]
0.86577046
0
return the path for the aovs file
def aovsPath(self): return fl.File( self._path + '/aovs.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path(self):\n return self._obs_file()", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def file_path(self) -> global___Expression:", "def file_path(self):\n return self.lib.file_path", "def path(self) -> str:\n return self.src + \"/\"", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def path(self):\n return self.file_path()", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def fpath(self):\n return os.path.join(self.path, self.name)", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def full_path(self):\n return os.path.abspath(self.path)", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def FilePath(self) -> str:", "def outpath(self):\n return None", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])", "def apk_path(self):\n return os.path.join(SETTINGS['repo_dir'], '%s.apk' % self.name)", "def get_scanrecpath(self):\n start_key = min(self.obsinfos)\n scanrecname = self.obsinfos[start_key].obsfoldername(\n source_name=self.scanrecparms['pointing'])\n scanrecpath = os.path.join(self.scanpath, scanrecname)\n return scanrecpath", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def output_file_path(self):\n return self.__output_file_path", "def file_path(self) -> Path:\n return self._input_file", "def get_file_save_path(self):\n return self.out", "def filepath(self):\n return self.file.path", "def rospath(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tfname = fname.rstrip(\"/\")\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\tr = fname[:fname.find(mark)+len(mark)-4]\t\n\treturn r", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def _get_organisms_file_path(self, gene_name, gene_id):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"organisms\", \"{}_{}.txt\".format(gene_name, gene_id))", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def _get_cora_filepath():\n # type: () -> Tuple[str, str]\n cache_root = download.get_dataset_directory(_root)\n feat_cache_path = os.path.join(cache_root, feat_file_name)\n edge_cache_path = os.path.join(cache_root, edge_file_name)\n return feat_cache_path, edge_cache_path", "def get_ocio_path():\n bl_path = os.getcwd()\n version = f'{bpy.app.version[0]}' + '.' + f'{bpy.app.version[1]}'\n cs_folder = os.path.join(bl_path, version, 'datafiles', 'colormanagement')\n\n return os.path.join(cs_folder, 'config.ocio')", "def full_path(self):\n fullpath = os.path.join(self.path, self.name)\n if self.path == \"\":\n fullpath = self.name\n return fullpath", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def get_absolute_path(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetAbsolutePath', self.handle)", "def get_full_path(self):\n try:\n full_path = os.path.abspath(self.FILENAME)\n return full_path\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_file_path(self):\n return self._file_path", "def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def path(self):\n return self.lib.path", "def get_file_path(self):\n config = LinnworksConfig.get_solo()\n order_export_file_dir = config.processed_orders_import_path\n exports = sorted(list(Path(order_export_file_dir).iterdir()))\n return exports[-1]", "def filepath(self):\n return self._filepath.path", "def AppPath(self):\n\t\treturn self.acad.Path", "def abspath(self):\n if self.__abspath is None:\n self.__abspath = pbxpath.abspath(self)\n return self.__abspath", "def get_file_path(self,filename):\n return Path(self.resource_path,filename)", "def _getAbsolutePath(self, filename):\n\n # find the correct path, in the experiment file they are either\n # relative to the experiment file, or an absolute path\n if filename != os.path.abspath(filename):\n return os.path.join(self._path, filename)\n else:\n return filename", "def path(self):\n ...", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def ospath(self, vPath):\n if not vPath.startswith('/'):\n raise OSError(vPath)\n parts = vPath.split('/')\n toppath = self._top_paths[parts[1]]\n return os.path.join(toppath, *parts[2:])", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def _get_assoc_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".assoc.txt\")", "def _get_gene_file_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\", \"genes.txt\")", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "def path_apparmor(self) -> Path:\n return self.path_supervisor / APPARMOR_DATA", "def ifaces_file(self):\n return self.system_path(self._ifaces_file)", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def _get_os_path(self, name=None, path=''):\n\t\t\n\t\tif self.notebook_dir:\n\t\t\tout_path =os.path.join( self.notebook_dir, path.lstrip('/'))\n\t\telse:\n\t\t\tout_path = path\n\t\t\n\t\tif name:\n\t\t\tout_path = os.path.join(out_path, name.lstrip('/'))\n\t\t\n\t\treturn out_path", "def path(self):\n\n if self.file_func:\n path = self.file_func(self.lookup_obj, **self.pattern_params)\n return FilePath(path=path)\n return FilePath(path=\"\")", "def path(self):\n return self._data_file", "def FilePath(self):\n\t\treturn self.acad.ActiveDocument.Path", "def full_path(name: str) -> Path:\n return PROGRAM_PATH / 'data' / 'saves' / name", "def file_path(self):\n return posixpath.dirname(self.file_name)", "def path_to_calib_dir_src_default(self):\n if cp.instr_dir.value() is None : return None\n if cp.instr_name.value() is None : return None\n if cp.exp_name_src.value() is None : return None\n return cp.instr_dir.value() + '/' + cp.instr_name.value() + '/' + cp.exp_name_src.value() + '/calib'\n #return os.path.join(cp.instr_dir.value(), cp.instr_name.value(), cp.exp_name_src.value(),'calib')", "def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")", "def get_archive_file_path(self,results):\n path = os.path.join(self.archive_path,results.version)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path,self.get_archive_filename(results))", "def get_out_file_path(self):\n dir_path = self._get_output_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.FOCUSED_IMAGE_NAME)", "def full_path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"full_path\")", "def summary_filepath(source):\n filename = f'{source}.dat'\n return os.path.join(OBS_DATA_PATH, source, filename)", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def abspath(self, *args):\n return os.path.join(self._spool, *args)", "def ruta_archivo(path):\n return os.path.abspath(path)", "def _file_storage_path(self, sha1, filename):\n # pylint: disable=no-member\n path = (\n '{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'\n '{student_id}/{sha1}{ext}'.format(\n\t\tstudent_id = self.xmodule_runtime.anonymous_student_id,\n loc=self.location,\n sha1=sha1,\n ext=os.path.splitext(filename)[1]\n )\n )\n return path", "def _get_as_path(self):\n return self.__as_path", "def get_file_full_path(self):\n return self.save_dir + os.sep + self.save_file_without_ext + self.save_file_ext", "def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")", "def structure_file_path(self):\n return os.path.join(\n self.base_path,\n self.structure_dir,\n self.content_path,\n self.structure_filename\n )", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def _GetOwnersFilePath(path):\n if _IsWellFormattedFilePath(path):\n # _SRC is removed because the file system on the machine running the code\n # may not have a(n) src directory.\n path_without_src = path[len(SRC):]\n\n return os.path.abspath(\n os.path.join(*(DIR_ABOVE_TOOLS + path_without_src.split(os.sep))))\n\n raise Error(\n 'The given path {} is not well-formatted. Well-formatted paths begin '\n 'with \"src/\" and end with \"OWNERS\"'.format(path))", "def get_absolute_path(self):\n if self.datafile and self.datafile.storage.exists(self.datafile.path):\n return self.datafile.path\n else:\n return None", "def __get_path(self):\n return self.path", "def default_agasc_dir():\n if 'AGASC_DIR' in os.environ:\n out = Path(os.environ['AGASC_DIR'])\n else:\n out = Path(os.environ['SKA'], 'data', 'agasc')\n return out", "def get_obs_path( full_obsid, basedir ):\n \n year = full_obsid[0:4]\n month = full_obsid[4:6]\n day = full_obsid[6:8]\n \n obs_path = \"/\".join( [basedir, year, month, day, full_obsid] ).replace(\"//\",\"/\")\n \n if not os.path.exists( obs_path ):\n raise Exception(\"{}: This path does not exist - please check your full OBSID\".format(obs_path))\n\n return obs_path", "def filepath(self):\n return self.filepath_", "def get_fmu_file_path(self):\n return self.fmu_file", "def ofile_name(self):\n return self.ofile", "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, \"cifar-10-batches-py/\", filename)", "def get_ais_path(self, vessel_type: str, simulation_date: datetime.date) -> Path:\n # adjust date to match AIS file naming convention\n file_date = f\"{self.year}{simulation_date.month:02}01\"\n\n for path in self.paths:\n if vessel_type in path.name and file_date in path.name:\n break\n\n return path", "def path_to_calib_dir_src_custom(self):\n return cp.calib_dir_src.value()", "def real_path(self):\n\t\treturn self.args[0]", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def info_file_path_abs(self) -> Path:\n return Path(self._repo.working_tree_dir, \"INFO.yaml\")", "def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))" ]
[ "0.7047884", "0.6801188", "0.67156875", "0.6595521", "0.6526361", "0.6481086", "0.64795697", "0.6452638", "0.6423643", "0.6420651", "0.64015955", "0.6397468", "0.63639444", "0.63354146", "0.63254094", "0.630958", "0.6307452", "0.62829566", "0.6233461", "0.6226466", "0.622618", "0.62031794", "0.62010825", "0.6191982", "0.61818427", "0.6180742", "0.6177824", "0.6173967", "0.61672175", "0.61605376", "0.61594427", "0.61516184", "0.61428833", "0.6128893", "0.6109812", "0.6102896", "0.609994", "0.6090866", "0.6090281", "0.6086606", "0.6086606", "0.6084226", "0.60782444", "0.6073849", "0.60676754", "0.60671407", "0.60549736", "0.6050585", "0.60474646", "0.6043422", "0.6032864", "0.6032496", "0.6024236", "0.602151", "0.602151", "0.6021126", "0.6013406", "0.6010354", "0.6002541", "0.59862334", "0.5983177", "0.5983061", "0.59829086", "0.5982871", "0.59821314", "0.59799886", "0.59789073", "0.597859", "0.5971893", "0.5968873", "0.5960559", "0.59599656", "0.5957429", "0.5954967", "0.5951023", "0.5946199", "0.59459984", "0.59442157", "0.59419054", "0.59343165", "0.59238935", "0.5916742", "0.5914583", "0.59133226", "0.59082305", "0.59061456", "0.59038854", "0.5892948", "0.5885992", "0.5884467", "0.5881052", "0.5874492", "0.5873037", "0.58725584", "0.5872554", "0.58719754", "0.5869784", "0.5866162", "0.5863816", "0.58608824" ]
0.87796766
0
return the path for the aovs file
def lightLinkPath(self): return fl.File( self._path + '/lights.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aovsPath(self):\n\t\treturn fl.File( self._path + '/aovs.data' )", "def file_path(self):\n return self._obs_file()", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def file_path(self) -> global___Expression:", "def file_path(self):\n return self.lib.file_path", "def path(self) -> str:\n return self.src + \"/\"", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def path(self):\n return self.file_path()", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def fpath(self):\n return os.path.join(self.path, self.name)", "def filenameAsPath(self, app):\n return app.recordingsPath.child(self.filename).path", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def full_path(self):\n return os.path.abspath(self.path)", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)", "def FilePath(self) -> str:", "def outpath(self):\n return None", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def apk_path(self):\n return os.path.join(SETTINGS['repo_dir'], '%s.apk' % self.name)", "def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])", "def get_scanrecpath(self):\n start_key = min(self.obsinfos)\n scanrecname = self.obsinfos[start_key].obsfoldername(\n source_name=self.scanrecparms['pointing'])\n scanrecpath = os.path.join(self.scanpath, scanrecname)\n return scanrecpath", "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def output_file_path(self):\n return self.__output_file_path", "def file_path(self) -> Path:\n return self._input_file", "def get_file_save_path(self):\n return self.out", "def filepath(self):\n return self.file.path", "def rospath(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tfname = fname.rstrip(\"/\")\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\tr = fname[:fname.find(mark)+len(mark)-4]\t\n\treturn r", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def _get_organisms_file_path(self, gene_name, gene_id):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"organisms\", \"{}_{}.txt\".format(gene_name, gene_id))", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def _get_cora_filepath():\n # type: () -> Tuple[str, str]\n cache_root = download.get_dataset_directory(_root)\n feat_cache_path = os.path.join(cache_root, feat_file_name)\n edge_cache_path = os.path.join(cache_root, edge_file_name)\n return feat_cache_path, edge_cache_path", "def get_ocio_path():\n bl_path = os.getcwd()\n version = f'{bpy.app.version[0]}' + '.' + f'{bpy.app.version[1]}'\n cs_folder = os.path.join(bl_path, version, 'datafiles', 'colormanagement')\n\n return os.path.join(cs_folder, 'config.ocio')", "def full_path(self):\n fullpath = os.path.join(self.path, self.name)\n if self.path == \"\":\n fullpath = self.name\n return fullpath", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def get_full_path(self):\n try:\n full_path = os.path.abspath(self.FILENAME)\n return full_path\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def get_absolute_path(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetAbsolutePath', self.handle)", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_file_path(self):\n return self._file_path", "def get_eve_path():\n return '{}\\\\CCP\\\\EVE'.format(get_appdata())", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def path(self):\n return self.lib.path", "def get_file_path(self):\n config = LinnworksConfig.get_solo()\n order_export_file_dir = config.processed_orders_import_path\n exports = sorted(list(Path(order_export_file_dir).iterdir()))\n return exports[-1]", "def filepath(self):\n return self._filepath.path", "def AppPath(self):\n\t\treturn self.acad.Path", "def abspath(self):\n if self.__abspath is None:\n self.__abspath = pbxpath.abspath(self)\n return self.__abspath", "def get_file_path(self,filename):\n return Path(self.resource_path,filename)", "def _getAbsolutePath(self, filename):\n\n # find the correct path, in the experiment file they are either\n # relative to the experiment file, or an absolute path\n if filename != os.path.abspath(filename):\n return os.path.join(self._path, filename)\n else:\n return filename", "def path(self):\n ...", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def ospath(self, vPath):\n if not vPath.startswith('/'):\n raise OSError(vPath)\n parts = vPath.split('/')\n toppath = self._top_paths[parts[1]]\n return os.path.join(toppath, *parts[2:])", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def _get_assoc_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".assoc.txt\")", "def _get_gene_file_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\", \"genes.txt\")", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def path_apparmor(self) -> Path:\n return self.path_supervisor / APPARMOR_DATA", "def path(self):\n\n if self.file_func:\n path = self.file_func(self.lookup_obj, **self.pattern_params)\n return FilePath(path=path)\n return FilePath(path=\"\")", "def ifaces_file(self):\n return self.system_path(self._ifaces_file)", "def _get_os_path(self, name=None, path=''):\n\t\t\n\t\tif self.notebook_dir:\n\t\t\tout_path =os.path.join( self.notebook_dir, path.lstrip('/'))\n\t\telse:\n\t\t\tout_path = path\n\t\t\n\t\tif name:\n\t\t\tout_path = os.path.join(out_path, name.lstrip('/'))\n\t\t\n\t\treturn out_path", "def path(self):\n return self._data_file", "def FilePath(self):\n\t\treturn self.acad.ActiveDocument.Path", "def full_path(name: str) -> Path:\n return PROGRAM_PATH / 'data' / 'saves' / name", "def file_path(self):\n return posixpath.dirname(self.file_name)", "def path_to_calib_dir_src_default(self):\n if cp.instr_dir.value() is None : return None\n if cp.instr_name.value() is None : return None\n if cp.exp_name_src.value() is None : return None\n return cp.instr_dir.value() + '/' + cp.instr_name.value() + '/' + cp.exp_name_src.value() + '/calib'\n #return os.path.join(cp.instr_dir.value(), cp.instr_name.value(), cp.exp_name_src.value(),'calib')", "def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")", "def get_archive_file_path(self,results):\n path = os.path.join(self.archive_path,results.version)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path,self.get_archive_filename(results))", "def get_out_file_path(self):\n dir_path = self._get_output_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.FOCUSED_IMAGE_NAME)", "def full_path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"full_path\")", "def summary_filepath(source):\n filename = f'{source}.dat'\n return os.path.join(OBS_DATA_PATH, source, filename)", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def abspath(self, *args):\n return os.path.join(self._spool, *args)", "def ruta_archivo(path):\n return os.path.abspath(path)", "def _file_storage_path(self, sha1, filename):\n # pylint: disable=no-member\n path = (\n '{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'\n '{student_id}/{sha1}{ext}'.format(\n\t\tstudent_id = self.xmodule_runtime.anonymous_student_id,\n loc=self.location,\n sha1=sha1,\n ext=os.path.splitext(filename)[1]\n )\n )\n return path", "def _get_as_path(self):\n return self.__as_path", "def get_file_full_path(self):\n return self.save_dir + os.sep + self.save_file_without_ext + self.save_file_ext", "def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")", "def structure_file_path(self):\n return os.path.join(\n self.base_path,\n self.structure_dir,\n self.content_path,\n self.structure_filename\n )", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def _GetOwnersFilePath(path):\n if _IsWellFormattedFilePath(path):\n # _SRC is removed because the file system on the machine running the code\n # may not have a(n) src directory.\n path_without_src = path[len(SRC):]\n\n return os.path.abspath(\n os.path.join(*(DIR_ABOVE_TOOLS + path_without_src.split(os.sep))))\n\n raise Error(\n 'The given path {} is not well-formatted. Well-formatted paths begin '\n 'with \"src/\" and end with \"OWNERS\"'.format(path))", "def get_absolute_path(self):\n if self.datafile and self.datafile.storage.exists(self.datafile.path):\n return self.datafile.path\n else:\n return None", "def __get_path(self):\n return self.path", "def default_agasc_dir():\n if 'AGASC_DIR' in os.environ:\n out = Path(os.environ['AGASC_DIR'])\n else:\n out = Path(os.environ['SKA'], 'data', 'agasc')\n return out", "def get_obs_path( full_obsid, basedir ):\n \n year = full_obsid[0:4]\n month = full_obsid[4:6]\n day = full_obsid[6:8]\n \n obs_path = \"/\".join( [basedir, year, month, day, full_obsid] ).replace(\"//\",\"/\")\n \n if not os.path.exists( obs_path ):\n raise Exception(\"{}: This path does not exist - please check your full OBSID\".format(obs_path))\n\n return obs_path", "def filepath(self):\n return self.filepath_", "def get_fmu_file_path(self):\n return self.fmu_file", "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, \"cifar-10-batches-py/\", filename)", "def ofile_name(self):\n return self.ofile", "def path_to_calib_dir_src_custom(self):\n return cp.calib_dir_src.value()", "def get_ais_path(self, vessel_type: str, simulation_date: datetime.date) -> Path:\n # adjust date to match AIS file naming convention\n file_date = f\"{self.year}{simulation_date.month:02}01\"\n\n for path in self.paths:\n if vessel_type in path.name and file_date in path.name:\n break\n\n return path", "def real_path(self):\n\t\treturn self.args[0]", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def info_file_path_abs(self) -> Path:\n return Path(self._repo.working_tree_dir, \"INFO.yaml\")", "def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))" ]
[ "0.87794113", "0.70491135", "0.6802213", "0.6716474", "0.65970516", "0.65285367", "0.6482856", "0.6481411", "0.64541376", "0.6425376", "0.6422676", "0.64034206", "0.6400361", "0.63644063", "0.6337509", "0.63264626", "0.6311372", "0.6308107", "0.62853396", "0.6235967", "0.6228521", "0.6228021", "0.62045753", "0.6202659", "0.6194101", "0.6183119", "0.6182376", "0.61792034", "0.6175717", "0.61679226", "0.61620045", "0.61600107", "0.61535734", "0.6145372", "0.61293393", "0.6111112", "0.61048937", "0.6101925", "0.6092978", "0.6092171", "0.6088425", "0.6088425", "0.6085638", "0.60792917", "0.60755605", "0.6069057", "0.60686797", "0.6056392", "0.6053604", "0.6049511", "0.6044954", "0.6034925", "0.6033416", "0.6026384", "0.6022739", "0.6022739", "0.60202664", "0.60151315", "0.6011553", "0.6003601", "0.59881264", "0.5984887", "0.598413", "0.59829223", "0.5982674", "0.59823483", "0.5982009", "0.59810734", "0.5980033", "0.59741044", "0.59702855", "0.5962594", "0.5961279", "0.59587616", "0.5956843", "0.59525394", "0.5947974", "0.5947153", "0.5945265", "0.59430295", "0.59358364", "0.59261435", "0.5918648", "0.59160095", "0.59141195", "0.59101313", "0.5905942", "0.5905442", "0.5894238", "0.58876944", "0.58853406", "0.5882594", "0.5875717", "0.58747774", "0.5873381", "0.5873046", "0.58723456", "0.58713716", "0.58688194", "0.5865632", "0.5862372" ]
0.0
-1
return the path for the masterLayer data
def masterPath(self): return fl.File( self._path + '/master.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataPath(self):\n return ''", "def data_path(self):\n raise NotImplementedError", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def path(self):\n return self._data_file", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def _getDatasetPath(self):\n return self.__dataset_path", "def get_pathname(self):\n return self.image_data.path", "def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)", "def path(self) :\n return self.m_path", "def local_path(self):\n return self._data.get('local_path')", "def root_path(self):\n return os.path.dirname(self.image.path)", "def root_path(self) -> Path:\n return ARCHIVES_ROOT / self.source_name / self.key", "def get_master_url(self, identifier) -> None:\n # TODO(victorhc): Implement the following method to fetch the cluster\n # master_url from Dataproc.\n return '.'.join([\n self.cluster_metadata.project_id,\n self.cluster_metadata.region,\n self.cluster_metadata.cluster_name\n ])", "def path(self):\n return self.repository_obj.path / self.name", "def path(self):\n return self.path", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]", "def get_target_object_path(data_path: str) -> str:\n path_split = data_path.rsplit('.', 1)\n self_targeting = len(path_split) < 2\n if self_targeting:\n return \"\"\n return path_split[0]", "def path( self ) :\n\n return( self.__path )", "def get_root():\n\n return 'data/simulators/mg1'", "def getPath(self):\n return self.__folder", "def path(self):\n return self._container_dir", "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "def path(self):\n return self.storage.path(self.name)", "def path_name(self):", "def get_root_filename(self):\n pass", "def getPath(self):\n return self.path", "def GetPath(self):\r\n\r\n return self.directory", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def get_data_folder(self, mode='absolute'):\n\n path = Path(f'sub-{self.sub_id}', f'ses-{self.ses_id}', self.modality)\n\n if mode == 'absolute':\n if self.basedir is None:\n raise ValueError('No base directory set.')\n path = self.basedir / path\n\n return path", "def path_addons_data(self) -> Path:\n return self.path_supervisor / ADDONS_DATA", "def root(self):\n return self.paths.root", "def get_root_augpath(self) -> str:\n return get_aug_path(self.loc[\"root\"])", "def get_path(self):\n return self.sync_path", "def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore", "def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'", "def get_project_data_folder(self):\n return os.path.join(settings.MEDIA_ROOT,self.short_name)", "def _LocalDataPath(local_file):\n return data.ResourcePath(local_file)", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_full_path(self):\n return self.path_display", "def get_path(self):\n return self.path", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def __get_path(self):\n return self.path", "def get_data_sequence_path(self) -> Path:\n if not self.sequence_data_paths.base_sequence_path:\n raise ValueError(\"No sequence data path set\")\n return self.sequence_data_paths.base_sequence_path", "def full_path(self) -> str:\n return self.workspace.get_full_path(self)", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def localPath(self):\n return self.home", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def get_root_path(self) -> Path:\n return self._path", "def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"", "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "def get_zonepath(self, refresh=False):\n return self.get_attr(ZONE_ENTRY['ZROOT'], refresh)", "def path(self):\n\t\tif '/' in self.name:\n\t\t\treturn self.name.split(\"/\")\n\t\telse:\n\t\t\treturn self.name.split(\"\\\\\")", "def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def dataset_dir(self):\n return self._dataset_dir", "def get_shp_file(self):\n files = os.listdir(self.targetpath)\n file = files[0].split('.')[0]\n return self.targetpath + '/' + file", "def path(self):\n # type: () -> string_types\n return self._path", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def data_dir(self) -> Path:\n return self._data_dir", "def path(self):\r\n raise NotImplementedError()", "def path_share(self) -> Path:\n return self.path_supervisor / SHARE_DATA", "def _chip_path(self):\n assert self._parent != None # no chip path is valid without a parent\n base_path = self._parent._chip_path()\n\n base_path.append(self.name)\n return base_path", "def build_path(self):\r\n return self.selmgr.select_path()", "def get_processed_data_folder(self):\n return self.config['processed_folder_path']", "def path(self) -> str:\n return os.path.join(DIR_CACHE_DATASETS, f\"{self.name}.parquet\")", "def getPath(self):\n return self._path", "def path(self):\n return self.file_path()", "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "def data_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\")", "def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)", "def path(self):\n return self._dir_entry.path", "def path(self) -> str:\n return self.src + \"/\"", "def get_data_path():\n up_folder = os.path.abspath(os.path.join(ROOT_DIR, '..'))\n img_folder = os.path.join(up_folder, 'data_set', 'XX-ImageLabel', 'train_data_416')\n img_file = os.path.join(DATA_DIR, \"t_img_tags_train.txt\") # 数据类别\n return img_folder, img_file", "def source_path(self, workspace):\n if self.file_name_method.value == FN_FROM_IMAGE:\n path_feature = \"%s_%s\" % (\n C_PATH_NAME,\n self.file_image_name.value,\n )\n assert workspace.measurements.has_feature(\"Image\", path_feature), (\n \"Image %s does not have a path!\" % self.file_image_name.value\n )\n return workspace.measurements.get_current_image_measurement(path_feature)\n\n # ... otherwise, chase the cpimage hierarchy looking for an image with a path\n cur_image = workspace.image_set.get_image(self.image_name.value)\n while cur_image.path_name is None:\n cur_image = cur_image.parent_image\n assert (\n cur_image is not None\n ), \"Could not determine source path for image %s' % (self.image_name.value)\"\n return cur_image.path_name", "def get_all_master_idx_paths(self):\n paths = utilities.get_all_master_index_paths(rootdir=constants.flow_data_dir)\n return paths", "def get_local_path(self) -> Optional[str]:\n return self._local_path", "def path_name(self):\n return self.full_name", "def server_relative_path(self):\n return self.properties.get(\"ServerRelativePath\", SPResPath(None))", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def _state_data_path(self) -> Path:\n return Path(self._state_dir, 'state')", "def path(self):\n ...", "def master(self):\n return self.remappers[self._master_name]" ]
[ "0.6769875", "0.6692991", "0.66356105", "0.642204", "0.63508654", "0.6319566", "0.63127893", "0.6197516", "0.6166026", "0.61191916", "0.6109279", "0.6093193", "0.60923404", "0.60806435", "0.60315734", "0.6025355", "0.60208064", "0.60151654", "0.6008396", "0.6005537", "0.59993035", "0.5991783", "0.59818435", "0.59721416", "0.5959343", "0.595346", "0.5951984", "0.59437186", "0.59374803", "0.59304136", "0.5928086", "0.59243345", "0.59235275", "0.5923133", "0.5919271", "0.5917366", "0.5904009", "0.59002805", "0.58949363", "0.5891885", "0.5887893", "0.58790165", "0.587021", "0.5869951", "0.58653784", "0.58400583", "0.58400583", "0.5831042", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.5822415", "0.58211756", "0.58145136", "0.58096063", "0.58070606", "0.5806169", "0.58051264", "0.57937527", "0.5789671", "0.57671237", "0.57671237", "0.57671237", "0.57671237", "0.5764481", "0.5759559", "0.5758079", "0.57512593", "0.57399684", "0.5738297", "0.5736939", "0.5734196", "0.5731959", "0.5731002", "0.5728163", "0.5727497", "0.572194", "0.5715717", "0.5708214", "0.5702842", "0.5702363", "0.5700485", "0.56974274", "0.56930476", "0.56914204", "0.5683828", "0.5674649", "0.5669143", "0.5667349", "0.5663656", "0.56592596", "0.56589144" ]
0.8091495
0
export information of scene to path
def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True): if exdata: self.exportData() if exshaders: self.exportShaders() if exlights: self.exportLights() if exaovs: self.exportAovs() if exmaster: self.exportMasterLayerSettings()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def export(self, savepath):\n logger.debug(f\"Exporting scene to {savepath}\")\n _backend = self.backend\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n path = Path(savepath)\n if path.suffix != \".html\":\n raise ValueError(\"Savepath should point to a .html file\")\n\n # prepare settings\n vsettings.notebookBackend = \"k3d\"\n\n # Create new plotter and save to file\n plt = Plotter()\n plt.add(self.clean_renderables, render=False)\n plt = plt.show(interactive=False)\n plt.camera[-2] = -1\n\n with open(path, \"w\") as fp:\n fp.write(plt.get_snapshot())\n\n print(\n f\"The brainrender scene has been exported for web. The results are saved at {path}\"\n )\n\n # Reset settings\n vsettings.notebookBackend = None\n self.backend = _backend\n\n return str(path)", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def dump(self, path):\n torch.save(self,path)", "def open_scene(file_path, save=True):\n\n pass", "def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()", "def save(self, export_path: str):", "def save_scene(force=True, **kwargs):\n\n pass", "def import_scene(file_path):\n\n pass", "def filemenu_Export(self):\n line_dict = {}\n for line in self.lines.values():\n for name, arr in line.to_mat().items():\n line_dict[name] = arr\n fileTypes = [(\"MATLAB file\",\"*.mat\"), (\"NumPy file\",\"*.npz\")]\n options = {}\n options['initialdir'] = os.path.expanduser('~')\n options['filetypes'] = fileTypes\n options['parent'] = self.master\n filename = filedialog.asksaveasfilename(**options)\n if filename:\n _, ext = os.path.splitext(filename)\n if ext == \".mat\":\n sio.savemat(filename, line_dict)\n elif ext == \".npz\":\n np.savez(filename, lines=line_dict)", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def save(self, _name):\r\n try:\r\n with open(_name, 'w+') as fout:\r\n fout.write(\".cube file generated from prt_esolv.py\\n\")\r\n fout.write(f\"{_name}\\n\")\r\n\r\n fout.write(\r\n f\"{int(self.n_atoms)} {float(self.origin[0])} {float(self.origin[1])} {float(self.origin[2])}\\n\")\r\n\r\n fout.write(f\"{int(self.n_x)} {float(self.x[0])} {float(self.x[1])} {float(self.x[2])}\\n\")\r\n fout.write(f\"{int(self.n_y)} {float(self.y[0])} {float(self.y[1])} {float(self.y[2])}\\n\")\r\n fout.write(f\"{int(self.n_z)} {float(self.z[0])} {float(self.z[1])} {float(self.z[2])}\\n\")\r\n\r\n for atom, xyz in zip(self.atoms, self.atoms_xyz):\r\n fout.write(f\"{atom} 0 {xyz[0]} {xyz[1]} {xyz[2]}\\n\")\r\n\r\n for ix in range(self.n_x):\r\n for iy in range(self.n_y):\r\n for iz in range(self.n_z):\r\n fout.write(f\"{self.data[ix][iy][iz]}\")\r\n if iz % 6 == 5:\r\n fout.write('\\n')\r\n fout.write(\"\\n\")\r\n except IOError:\r\n print(f\"Can't create {_name} file!!!\")\r\n raise\r\n\r\n return None", "def save(self, file_path):\n get_base().scene_parser.save(self, file_path)", "def getOpenSceneInfo(self):\n logger.debug(\"Func: getOpenSceneInfo\")\n\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n if not self._pathsDict[\"sceneFile\"]:\n return None\n\n # get name of the upper directory to find out base name\n sceneDir = os.path.abspath(os.path.join(self._pathsDict[\"sceneFile\"], os.pardir))\n baseSceneName = os.path.basename(sceneDir)\n\n upperSceneDir = os.path.abspath(os.path.join(sceneDir, os.pardir))\n upperSceneDirName = os.path.basename(upperSceneDir)\n\n if upperSceneDirName in self._subProjectsList:\n subProjectDir = upperSceneDir\n subProject = upperSceneDirName\n categoryDir = os.path.abspath(os.path.join(subProjectDir, os.pardir))\n category = os.path.basename(categoryDir)\n\n dbCategoryPath = os.path.normpath(os.path.join(self._pathsDict[\"databaseDir\"], category))\n dbPath = os.path.normpath(os.path.join(dbCategoryPath, subProject))\n\n pbCategoryPath = os.path.normpath(os.path.join(self._pathsDict[\"previewsDir\"], category))\n pbSubPath = os.path.normpath(os.path.join(pbCategoryPath, subProject))\n pbPath = os.path.normpath(os.path.join(pbSubPath, baseSceneName))\n\n else:\n subProject = self._subProjectsList[0]\n categoryDir = upperSceneDir\n category = upperSceneDirName\n dbPath = os.path.normpath(os.path.join(self._pathsDict[\"databaseDir\"], category))\n pbCategoryPath = os.path.normpath(os.path.join(self._pathsDict[\"previewsDir\"], category))\n pbPath = os.path.normpath(os.path.join(pbCategoryPath, baseSceneName))\n\n jsonFile = os.path.join(dbPath, \"{}.json\".format(baseSceneName))\n if os.path.isfile(jsonFile):\n version = (self.niceName(self._pathsDict[\"sceneFile\"])[-4:])\n self._openSceneInfo = {\n \"jsonFile\":jsonFile,\n \"projectPath\":self._pathsDict[\"projectDir\"],\n \"subProject\":subProject,\n \"category\":category,\n \"shotName\":baseSceneName,\n \"version\":version,\n \"previewPath\":pbPath\n }\n return self._openSceneInfo\n else:\n return None", "def save_file(self):\n if self.select_path.text() != \"\":\n filepath = self.select_path.text()\n road_network = self.map_selection.currentText()\n if self.map_selection.currentText() == \"User Defined\":\n road_network = self.map_selection_user_defined.text()\n gen_xml = GenerateXML(filepath, road_network)\n gen_xml.main()\n # remember Road Network for future\n set_metadata(road_network_filepath=road_network)\n else:\n message = \"No export path was selected\"\n iface.messageBar().pushMessage(\"Warning\", message, level=Qgis.Warning)\n QgsMessageLog.logMessage(message, level=Qgis.Warning)", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def dump_pth(filename, obj, **kwargs):\n import torch\n return torch.save(obj, filename)", "def outputPath():\n scenePath = bpy.data.filepath\n # If the scene hasn't been saved yet the path is empty.\n # Returning an empty path prompts the user for saving the scene.\n if not scenePath:\n return\n renderPath = os.path.join(os.path.dirname(scenePath), \"{}_thumbs\".format(NAME))\n return renderPath", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + 'camera\\n')\n buffer.write(ind + '{\\n')\n buffer.write(f'{ind}\\t\"position\" \"[{self.pos}]\"\\n')\n buffer.write(f'{ind}\\t\"look\" \"[{self.target}]\"\\n')\n buffer.write(ind + '}\\n')", "def saveMovie(self):\n\t\tself.getStackView().saveStackMovie()", "def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)", "def scene_name():\n\n pass", "def exportData(self):\n self.fileName = QtGui.QFileDialog.getSaveFileName(self, self.tr(\"Save Data\"), \"\", \n self.tr('Atom Positions (*.pdb)'))\n if not self.fileName.isEmpty():\n self.setCursor(QtCore.Qt.WaitCursor)\n selectedChain = self.main_chain\n PDBstring = selectedChain.toPDB( CAlphaPlaceholders=False)\n F = open(self.fileName, 'w')\n F.write(PDBstring)\n F.close()\n self.dirty = False\n self.setCursor(QtCore.Qt.ArrowCursor)", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def export(self):\n memento = self.create_memento()\n try:\n f = open(\"story.txt\", \"w\")\n try:\n f.write(memento.__str__())\n finally:\n f.close()\n except IOError:\n print 'IOError while exporting story!'", "def save(self, filename='mesh.json', verbose=False):\n\n f = os.path.abspath(filename) # make sure we are working with abs path\n with open(f, 'w') as outfile:\n json.dump(self.serialize(), outfile)\n\n if verbose is True:\n print('Saved {}'.format(f))\n\n return f", "def WriteExport(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n if (self.__currentImportProperName == None): return\r\n \r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n basename = self.__currentImportProperName + \".dae\"\r\n \r\n command = \"\"\r\n \r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__EXPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myEProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + \r\n setting.GetValue() + \"\\n\")\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n \r\n self.__script.write(\r\n \"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"set myEProp = CreateExportFTKOptions()\\n\"\r\n \"myEProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n \"\\\"\\n\" +\r\n \"myEProp.Parameters(\\\"Format\\\").Value = 1\\n\"\r\n \"myEProp.Parameters(\\\"Verbose\\\").Value = True\\n\" +\r\n command +\r\n \"ExportFTK myEProp.Name\\n\"\r\n )\r\n \r\n return [basename,]", "def save_debug_predict_image(self, scene, debug_dir_uri):\n pass", "def create_main_saver_node(self, version):\n fps = 25\n if version:\n project = version.task.project\n fps = project.fps\n\n random_ref_id = uuid.uuid4().hex\n\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n 'connected_to': {\n # 'ref_id': random_ref_id\n 'Input': {\n 'type': 'ColorCurves',\n 'ref_id': random_ref_id,\n 'input_list': {\n 'EditAlpha': 0.0,\n },\n 'connected_to': {\n 'Input': {\n 'type': 'CineonLog',\n 'input_list': {\n 'Mode': 1,\n # 'RedBlackLevel': 0.0,\n # 'RedWhiteLevel': 1023.0,\n 'RedFilmStockGamma': 1.0\n },\n }\n }\n }\n }\n },\n },\n {\n 'name': 'tga',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('tga'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'tga'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'TGAFormat',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 1,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 1,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mov',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mov'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mov'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'Apple ProRes 422 HQ_apch',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n\n 'QuickTimeMovies.LimitDataRate': 0.0,\n 'QuickTimeMovies.DataRateK': 1000.0,\n 'QuickTimeMovies.Advanced': 1.0,\n 'QuickTimeMovies.Primaries': 0.0,\n 'QuickTimeMovies.Transfer': 0.0,\n 'QuickTimeMovies.Matrix': 0.0,\n 'QuickTimeMovies.PixelAspectRatio': 0.0,\n 'QuickTimeMovies.ErrorDiffusion': 1.0,\n 'QuickTimeMovies.SaveAlphaChannel': 1.0,\n\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n\n\n\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n ]\n\n if version.task.type and version.task.type.name == 'Plate':\n # create a different type of outputs\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 0,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n },\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n },\n },\n ]\n\n # selectively generate output format\n saver_nodes = self.get_main_saver_node()\n\n for data in output_format_data:\n format_name = data['name']\n node_tree = data['node_tree']\n\n # now check if a node with the same name exists\n format_node = None\n format_node_name = self.output_node_name_generator(format_name)\n for node in saver_nodes:\n node_name = node.GetAttrs('TOOLS_Name')\n if node_name.startswith(format_node_name):\n format_node = node\n break\n\n # create the saver node for this format if missing\n if not format_node:\n self.create_node_tree(node_tree)\n else:\n # just update the input_lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = format_node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n try:\n os.makedirs(\n os.path.dirname(\n self.output_path_generator(version, format_name)\n )\n )\n except OSError:\n # path already exists\n pass", "def _save_mayavi_figure(self, fig, filename, azimuth=153, elevation=62,\n distance=400, focalpoint=[25., 63., 60.], aa=16,\n size=(1024, 1024)):\n scene = fig.scene\n\n scene.anti_aliasing_frames = aa\n\n mlab.view(azimuth=azimuth, elevation=elevation, distance=distance,\n focalpoint=focalpoint)\n\n scene.save(filename, size=size)\n\n return filename", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def save_pose_to_shelf(*args):\n target_shelf = mel.eval('tabLayout -q -selectTab $gShelfTopLevel;')\n store_cmds = 'import pymel.core as pm \\n' \\\n 'import mimic_utils \\n' \\\n 'reload(mimic_utils) \\n\\n' \\\n # 'if not check_robot_selection(1): \\n' \\\n # ' robot = \\'\\' \\n\\n'\n\n start_line_code = \"[\"\n end_line_code = \"],\\n\"\n\n if not check_robot_selection(1):\n pm.warning(\"Must select exactly one robot\")\n return\n\n robot = get_robot_roots()[0]\n # Check which mode we're in\n current_tab = pm.tabLayout('switcher_tab_layout', query=True, st=True)\n\n # IK MODE\n if current_tab == 'ikTab':\n store_cmds += 'tab = 1 \\n'\n store_cmds += 'attrs = ['\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n target_ctrl_str = __TARGET_CTRL_PATH\n\n config_attrs = ['ik', 'v', 'ikSolution1', 'ikSolution2', 'ikSolution3']\n for each in config_attrs:\n find_val = pm.getAttr(target_ctrl_path + \".\" + each)\n save_to_shelf = (start_line_code\n + \"'\"\n + (target_ctrl_str + \".\" + each)\n + \"', \" + \" %f\" + end_line_code) % find_val\n\n store_cmds += save_to_shelf\n\n # If a tool controller exists, use that to keyframe transformation\n # attributes\n if pm.objExists(tool_ctrl_path):\n target_ctrl = tool_ctrl_path\n target_ctrl_str = __TOOL_CTRL_PATH\n else:\n target_ctrl = target_ctrl_path\n target_ctrl_str = __TARGET_CTRL_PATH\n\n keyable = pm.listAttr(target_ctrl,\n k=True,\n r=True,\n w=True,\n c=True,\n u=True)\n\n # Remove robotSubtype from list\n # In future rigs, this shouldn't be keyable\n if 'robotSubtype' in keyable:\n keyable.remove('robotSubtype')\n\n for each in keyable:\n find_val = pm.getAttr(target_ctrl + \".\" + each)\n save_to_shelf = (start_line_code + \"'\" + (\n target_ctrl_str + \".\" + each) + \"', \" + \" {}\".format(find_val) + end_line_code)\n store_cmds += save_to_shelf\n\n # FK MODE\n else:\n\n store_cmds += 'tab = 2 \\n'\n store_cmds += 'attrs = ['\n\n target_ctrl_path = get_target_ctrl_path(robot)\n target_ctrl_str = __TARGET_CTRL_PATH\n\n config_attrs = ['ik', 'v']\n for each in config_attrs:\n find_val = pm.getAttr(target_ctrl_path + \".\" + each)\n save_to_shelf = (start_line_code + \"'\" + (\n target_ctrl_str + \".\" + each) + \"', \" + \" %f\" + end_line_code) % find_val\n store_cmds += save_to_shelf\n\n joint_vals = [__A1_FK_CTRL_PATH,\n __A2_FK_CTRL_PATH,\n __A3_FK_CTRL_PATH,\n __A4_FK_CTRL_PATH,\n __A5_FK_CTRL_PATH,\n __A6_FK_CTRL_PATH]\n joint_val_attr = ['rotateY', 'rotateX', 'rotateX', 'rotateZ', 'rotateX', 'rotateZ']\n\n for i, each in enumerate(joint_vals):\n attrs = format_path(each + \".\" + joint_val_attr[i], robot)\n attr_str = each + \".\" + joint_val_attr[i]\n find_val = pm.getAttr(attrs)\n save_to_shelf = (start_line_code + \"'\" + attr_str + \"', \" + \" %f\" + end_line_code) % find_val\n store_cmds += save_to_shelf\n\n store_cmds += '] \\n\\n' \\\n 'mimic_utils.assign_saved_pose(attrs, tab) \\n'\n\n prompt_dialog = pm.promptDialog(t=\"Robot Pose\", m=\"Pose Name:\", b=\"Save\")\n\n # Condition statement that checks if our button gets clicked.\n # If this condition is met, then run the following commands\n if prompt_dialog == \"Save\":\n # This variable stores the Name we add to our Prompt Dialog\n prompt_dialog_name = pm.promptDialog(query=True, text=True)\n # This line creates our Shelf Button that uses MEL as the source type\n # for the commands stored in \"store_cmds\", and adds the Shelf Button\n # under our custom tab named \"Body Poses\"\n pm.shelfButton(l=prompt_dialog_name,\n annotation=prompt_dialog_name,\n imageOverlayLabel=prompt_dialog_name,\n i='commandButton.png',\n command=store_cmds,\n p=target_shelf,\n sourceType=\"python\")", "def render_save(scene, cam, globalIdx, trajDir, camDir, NI=1280, NJ=720):\n #render image/convert to bimg\n expimg = scene.render(cam, NI, NJ);\n bimg = convert_image(expimg); \n exp_fname = trajDir + \"/exp_%(#)06d.png\"%{\"#\":globalIdx};\n save_image(bimg, exp_fname); \n\n #save cam\n cam_name = camDir + \"/cam_%(#)06d.txt\"%{\"#\":globalIdx}\n save_perspective_camera(cam, cam_name)\n remove_from_db([cam, expimg, bimg])", "def save(self):\n with open(self.slide_coords_file, 'w') as f:\n json.dump(self.dump(), f)", "def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)", "def print_geometries(self, name) -> None:\n\n open(f\"{name}.xyz\", \"w\").close() # Empty the file\n\n for i, image in enumerate(self):\n energy = image.energy if image.energy is not None else \"none\"\n\n title_line = (\n f\"autodE path point {i}. E = {energy} \"\n f\"charge = {image.charge} \"\n f\"mult = {image.mult} \"\n )\n\n if image.solvent is not None:\n title_line += f\"solvent = {image.solvent.name} \"\n\n atoms_to_xyz_file(\n image.atoms,\n f\"{name}.xyz\",\n title_line=title_line,\n append=True,\n )\n return None", "def dump(self, filename=\".azimint.json\"):\n print \"Dump!\"\n to_save = { \"poni\": str(self.poni.text()).strip(),\n \"detector\": str(self.detector.currentText()).lower(),\n \"wavelength\":float_(self.wavelength.text()),\n \"splineFile\":str(self.splineFile.text()).strip(),\n \"pixel1\": float_(self.pixel1.text()),\n \"pixel2\":float_(self.pixel2.text()),\n \"dist\":float_(self.dist.text()),\n \"poni1\":float_(self.poni1.text()).strip(),\n \"poni2\":float_(self.poni2.text()).strip(),\n \"rot1\":float_(self.rot1.text()).strip(),\n \"rot2\":float_(self.rot2.text()).strip(),\n \"rot3\":float_(self.rot3.text()).strip(),\n \"do_dummy\": bool(self.do_dummy.isChecked()),\n \"do_mask\": bool(self.do_mask.isChecked()),\n \"do_dark\": bool(self.do_dark.isChecked()),\n \"do_flat\": bool(self.do_flat.isChecked()),\n \"do_polarization\":bool(self.do_polarization.isChecked()),\n \"val_dummy\":float_(self.val_dummy.text()).strip(),\n \"delta_dummy\":float_(self.delta_dummy.text()).strip(),\n \"mask_file\":str(self.mask_file.text()).strip(),\n \"dark_current\":str(self.dark_current.text()).strip(),\n \"flat_field\":str(self.flat_field.text()).strip(),\n \"polarization_factor\":float_(self.polarization_factor.value()),\n \"nbpt_rad\":int_(self.rad_pt.text()),\n \"do_2D\":bool(self.do_2D.isChecked()),\n \"nbpt_azim\":int_(self.nbpt_rad.text()),\n \"chi_discontinuity_at_0\": bool(self.chi_discontinuity_at_0.isChecked()),\n \"do_radial_range\": bool(self.do_radial_range.isChecked()),\n \"do_azimuthal_range\": bool(self.do_azimuthal_range.isChecked()),\n \"radial_range_min\":float_(self.radial_range_min.text()),\n \"radial_range_max\":float_(self.radial_range_max.text()),\n \"azimuth_range_min\":float_(self.azimuth_range_min.text()),\n \"azimuth_range_max\":float_(self.azimuth_range_max.text()),\n }\n if self.q_nm.isChecked():\n to_save[\"unit\"] = \"q_nm^-1\"\n elif self.tth_deg.isChecked():\n to_save[\"unit\"] = \"2th_deg\"\n elif self.r_mm.isChecked():\n to_save[\"unit\"] = \"r_mm\"\n with open(filename, \"w\") as myFile:\n json.dump(to_save, myFile, indent=4)\n logger.debug(\"Saved\")", "def save_particles_image(self):\n base_filename = self.config['info']['filename_microscope']\n self.save_image_microscope_camera(base_filename)", "def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path", "def read(scene_name):\n routes, fixtures = read_fixtures(scene_name)\n scene = build_scene_from_fixtures(fixtures, scene_name)\n write_to_json(scene, scene_name)\n if routes:\n write_to_json(build_routes_file(routes, scene_name), scene_name + \"-routes\")", "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"", "def get_scene_path(self):\n return self._maybe_fix_windows_path(self._document.GetDocumentPath())", "def saveViews(lib, filename='views', path=os.path.expanduser('~')):\n ext = '.camera'\n os.chdir(path)\n f = open(filename + ext, 'wb')\n pickle.dump(lib, f, pickle.HIGHEST_PROTOCOL)\n f.close()", "def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])", "def save_path(self):\n raise NotImplementedError", "def save_path(self):\n raise NotImplementedError", "async def async_save_scene_names(work_dir: str):\n out_json = json.dumps(_scene_names, indent=2)\n scene_file = path.join(work_dir, SCENE_FILE)\n async with aiofiles.open(scene_file, \"w\") as afp:\n await afp.write(out_json)\n await afp.flush()", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def save(self):\n fname = self.dir_saving+str(self.folder)+'/colours.txt'\n if not os.path.isfile(fname):\n self.file_com = open(fname, 'w')\n else:\n print 'warning this person has an objects file in its dir, I will rewrite it.'\n self.file_com = open(fname, 'w')\n\n self.file_com.write(self.all_objects['upper']+','+self.all_objects['lower'])\n # self.all_objects = {}\n self.first_click = 1\n self.file_com.close()\n self.NextVideo()\n # count = 1\n # for im_name in self.onlyfiles:\n # img = cv2.imread(self.dir2+im_name)\n # cv2.rectangle(img,(0,0),(250,50),(255,255,255),-1)\n # cv2.putText(img,'frame : '+str(count),(10,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0),2)\n # img = self.add_objects(img)\n # cv2.imwrite(self.dir_saving+str(self.folder)+'/obj_images/'+im_name,img)\n # count+=1\n self.clear", "def updateSceneName(*args):\n pi.openSceneFullPath = cmds.file(q=True, sn=True)\n pi.openScene = os.path.basename(pi.openSceneFullPath)\n\n if pi.openScene == \"\":\n pi.openScene = \"UNSAVED SCENE!\"\n cmds.text(widgets[\"sceneText\"], e=True, l=pi.openScene)", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def save_visualization_to_file(self, file_name, length = 90):\n session = self.capture_last(length)\n visualizer.animate(session , 0, length, name = file_name, min_x = -1, max_x = 1, min_y = -1, max_y = 1, show = False )", "def save(self, path):\n torch.save(\n {\n \"input_dimension\": self.input_dimension,\n \"quantiles\": self.quantiles,\n \"width\": self.width,\n \"depth\": self.depth,\n \"activation\": self.activation,\n \"network_state\": self.state_dict(),\n \"optimizer_state\": self.optimizer.state_dict(),\n },\n path,\n )", "def export(self, path):\r\n # Save plot as png\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n\r\n plt.gca().set_aspect('equal', adjustable='box')\r\n plt.savefig(path + '.png')", "def export(self):\n self.output = []\n # This is not the best way of catching errors, but timesketch_utils will be\n # deprecated soon.\n # TODO: Consider using the official Timesketch python API.\n if not self.timesketch_api.session:\n return\n self.timesketch_api.export_artifacts(self.paths, self.sketch_id)\n self.sketch_url = self.timesketch_api.get_sketch_url(self.sketch_id)\n self.console_out.StdOut(\n 'Your Timesketch URL is: {0:s}'.format(self.sketch_url))\n self.output.append(self.sketch_url)", "def reference_scene(file_path, **kwargs):\n\n pass", "def save_information(self, path: utils.URLPath):\n # Text summary of model\n with (path / \"model_summary.txt\").open(\"w\") as summary_file:\n def print_file(*args, **kwargs):\n print(*args, **kwargs, file=summary_file)\n self.model.summary(print_fn=print_file)\n\n # Image plotting structure of model\n keras.utils.plot_model(self.model, to_file=str(path / \"model_plot.png\"))\n\n # plot all training history\n for i, (meta, history) in enumerate(self.training_history):\n training_output = path / f\"train_{i}\"\n io_functions.save_json(meta, training_output / \"info.json\")\n plot_training_history(history, training_output / \"training.png\")", "def saveAnim(self):\n animLabel = str(self.nameEditLine.text())\n selectionList = cmds.ls(sl=1)\n if animLabel:\n if selectionList:\n currentItem = self.fileDir.selectedItems()\n if currentItem:\n # export anim curve\n\n currentAnimGIFPath = exportAnimCurve.exportAnimCurve(selectionList, currentItem[-1].toolTip(0),\n animLabel, self.tempGIFDir, self.iconsDir)\n\n self.nameEditLine.clear()\n self.recordBtn.loadGIF2Button(path=currentAnimGIFPath)\n\n # refresh\n # self.loadCurrentFolder()\n logger.info('Successfully Save Anim Curve')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning', 'No folder selected!\\nPlease select the folder!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No folder selected!\\nPlease select the folder!')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning',\n 'No item selected!\\nPlease select the Control Object(s)!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No item selected!\\nPlease select the Control Object(s)!')\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning',\n 'No pose file name enter!\\nPlease input the pose file name!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No pose file name enter!\\nPlease input the pose file name!')", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def saveSnapshot(self, filename): \n\t\tpass", "def save(self, path):\n torch.save(self, path)", "def save(self, path):\n torch.save(self, path)", "def dump(self, filename=\".azimint.json\"):\n logger.info(\"Dump!\")\n to_save = { \"poni\": str_(self.poni.text()).strip(),\n \"detector\": str_(self.detector.currentText()).lower(),\n \"wavelength\":float_(self.wavelength.text()),\n \"splineFile\":str_(self.splineFile.text()).strip(),\n \"pixel1\": float_(self.pixel1.text()),\n \"pixel2\":float_(self.pixel2.text()),\n \"dist\":float_(self.dist.text()),\n \"poni1\":float_(self.poni1.text()),\n \"poni2\":float_(self.poni2.text()),\n \"rot1\":float_(self.rot1.text()),\n \"rot2\":float_(self.rot2.text()),\n \"rot3\":float_(self.rot3.text()),\n \"do_dummy\": bool(self.do_dummy.isChecked()),\n \"do_mask\": bool(self.do_mask.isChecked()),\n \"do_dark\": bool(self.do_dark.isChecked()),\n \"do_flat\": bool(self.do_flat.isChecked()),\n \"do_polarization\":bool(self.do_polarization.isChecked()),\n \"val_dummy\":float_(self.val_dummy.text()),\n \"delta_dummy\":float_(self.delta_dummy.text()),\n \"mask_file\":str_(self.mask_file.text()).strip(),\n \"dark_current\":str_(self.dark_current.text()).strip(),\n \"flat_field\":str_(self.flat_field.text()).strip(),\n \"polarization_factor\":float_(self.polarization_factor.value()),\n \"nbpt_rad\":int_(self.nbpt_rad.text()),\n \"do_2D\":bool(self.do_2D.isChecked()),\n \"nbpt_azim\":int_(self.nbpt_azim.text()),\n \"chi_discontinuity_at_0\": bool(self.chi_discontinuity_at_0.isChecked()),\n \"do_solid_angle\": bool(self.do_solid_angle.isChecked()),\n \"do_radial_range\": bool(self.do_radial_range.isChecked()),\n \"do_azimuthal_range\": bool(self.do_azimuthal_range.isChecked()),\n \"do_poisson\": bool(self.do_poisson.isChecked()),\n \"radial_range_min\":float_(self.radial_range_min.text()),\n \"radial_range_max\":float_(self.radial_range_max.text()),\n \"azimuth_range_min\":float_(self.azimuth_range_min.text()),\n \"azimuth_range_max\":float_(self.azimuth_range_max.text()),\n }\n for unit, widget in self.units.items():\n if widget is not None and widget.isChecked():\n to_save[\"unit\"] = unit.REPR\n break\n else:\n logger.warning(\"Undefined unit !!!\")\n try:\n with open(filename, \"w\") as myFile:\n json.dump(to_save, myFile, indent=4)\n except IOError as error:\n logger.error(\"Error while saving config: %s\" % error)\n else:\n logger.debug(\"Saved\")\n return to_save", "def dump_data(self,filename,dump_id):\n import pickle\n from Auxiliary import tdc_Filenames\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n dump_dict={}\n dump_dict['fft_data'] = data\n dump_dict['fitting_type'] = self.fft_fit.type \n dump_dict['nk_plot'] = self.fft_fit.nk_plot\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( dump_dict, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def save_params():\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)", "def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def export_to_file(self):\r\n return True", "def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)", "def export_project_dump(self, key):", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def saveToFile(self):\n filename = str(self.outputFileName.text())\n\n if not len(filename):\n return\n\n if os.path.exists(filename) and not self.overwriteCheck.isChecked():\n self.mainWindow.displayWarning(\"File already exists: not overwriting\")\n return\n\n # lattice object\n lattice = self.rendererWindow.getCurrentInputState()\n\n # gather vis atoms if required\n if self.writeFullLattice:\n visibleAtoms = None\n else:\n visibleAtoms = self.rendererWindow.gatherVisibleAtoms()\n\n # write Lattice\n lattice.writeLattice(filename, visibleAtoms=visibleAtoms)", "def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()", "def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)", "def save(self, path: str, store_obj=False):\n if not os.path.exists(path):\n raise IOError(ctext(f\"'{path}' does not exist!\", error))\n\n mprint(\"Saving results, this may take a bit ...\", log)\n proj_path = os.path.join(path, self.label.replace(\" \",\"_\"))\n index = 1\n while True:\n if not os.path.exists(proj_path):\n break\n\n proj_path = os.path.join(path, self.label.replace(\" \",\"_\") + f\"_{index}\")\n index += 1\n\n os.makedirs(proj_path)\n with cd(proj_path):\n os.makedirs(\"data\")\n os.makedirs(\"plots\")\n\n with cd(\"data\"):\n if self._result is not None:\n frame: df = self._result.drop(columns=['f_obj'])\n frame.index.name = 'f_nr'\n df_list = [(self.settings, '#Settings'),\n (self.statistics, '#Statistics'),\n (frame, '#Result')]\n\n with open('result.csv', 'w') as f:\n for fr, comment in df_list:\n f.write(f\"{comment}\\n\")\n fr.to_csv(f)\n f.write(\"\\n\\n\")\n\n self._combinations.to_csv('combinations.csv')\n self.lc.to_csv(\"LC.txt\",**{'index':False})\n self.pdg.to_csv(\"PS.txt\")\n if self._ff is not None:\n self._ff.res_lc.to_csv(\"LC_residual.txt\",**{'index':False})\n self._ff.res_pdg.to_csv(\"PS_residual.txt\")\n\n if self._notes is not None:\n with open(\"notes.txt\",'w') as f:\n f.writelines(self._notes)\n\n if store_obj:\n pickle.dump(self, open(\"obj.smurfs\", \"wb\"))\n\n with cd(\"plots\"):\n images = [(self.lc, \"LC.pdf\"),\n (self.pdg, \"PS.pdf\"),\n ]\n\n if self._ff is not None:\n images += [(self._ff.res_lc, \"LC_residual.pdf\"),\n (self._ff.res_pdg, \"PS_residual.pdf\"),\n (self._ff, \"PS_result.pdf\")\n ]\n\n for obj, name in images:\n fig, ax = pl.subplots(figsize=(16, 10))\n if isinstance(obj, LightCurve):\n obj.scatter(ax=ax)\n else:\n obj.plot(ax=ax, markersize=2)\n pl.tight_layout()\n fig.savefig(name)\n pl.close()\n\n if self.validation_page is not None:\n pdf = matplotlib.backends.backend_pdf.PdfPages(\"Validation_page.pdf\")\n for fig in self.validation_page: ## will open an empty extra figure :(\n pdf.savefig(fig)\n pdf.close()\n\n for fig in self.validation_page:\n pl.close(fig)\n\n mprint(f\"{self.label} Data saved!\",info)", "def store(self, filename):", "def save_mix(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMixFile(path)\n remote.runCommand(cmd)", "def save(self):\n\n # TODO:Find place to save data, write logic to save images(Filter out video?)", "def write_surface_info(surface_info):\n with open(os.path.join(PLUGIN_DIR, 'surface_info.json'), 'w') as outfile:\n json.dump(surface_info, outfile)", "def export(self, file: TextIO) -> None:\n file.write(f'\"{self.name}\"\\n\\t{{\\n')\n file.write(f'\\tchannel {self.channel}\\n')\n file.write(f'\\tsoundlevel {join_float(self.level)}\\n')\n\n if self.volume != (1, 1):\n file.write(f'\\tvolume {join_float(self.volume)}\\n')\n if self.pitch != (100, 100):\n file.write(f'\\tpitch {join_float(self.pitch)}\\n')\n\n if len(self.sounds) != 1:\n file.write('\\trndwave\\n\\t\\t{\\n')\n for wav in self.sounds:\n file.write(f'\\t\\twave \"{wav}\"\\n')\n file.write('\\t\\t}\\n')\n else:\n file.write(f'\\twave \"{self.sounds[0]}\"\\n')\n\n if self.force_v2 or self.stack_start or self.stack_stop or self.stack_update:\n file.write(\n '\\t' 'soundentry_version 2\\n'\n '\\t' 'operator_stacks\\n'\n '\\t\\t' '{\\n'\n )\n if self.stack_start:\n file.write(\n '\\t\\t' 'start_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_start:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_update:\n file.write(\n '\\t\\t' 'update_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_update:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_stop:\n file.write(\n '\\t\\t' 'stop_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_stop:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n file.write('\\t\\t}\\n')\n file.write('\\t}\\n')", "def write(self, pathname='wind.png'):\r\n cv2.imwrite(pathname, self.matrix * 255)", "def save(self, filename):\n pass", "def _toFile(self):\n pass", "def write_graph_ui(self):\n filename = input('enter name of the file to save to: ')\n write_graph(self._graph, filename)", "def export(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net') #+ name)\n export_path = os.path.join(self.configuration['export_path'], 'exported_net_{}.pth'.format(name))\n batch_fixed = self.input[:,1,:,:,:]\n batch_moving = self.input[:,2,:,:,:]\n traced_script_module = torch.jit.trace(net, (batch_moving, batch_fixed))\n traced_script_module.save(export_path)", "def dump_data(self,filename,dump_id):\n # get pure data copy\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( data, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def output(self):\n return {\n \"action\": \"RunScene\",\n \"arguments\": [\n {\n \"name\": \"SceneNum\", \n \"value\": self.id\n }\n ], \n \"service\": \"urn:micasaverde-com:serviceId:HomeAutomationGateway1\"\n }", "def save(self, path):\n torch.save({\"epoch\": self.epoch,\n \"state_dict\": self.state_dict()}, path)\n self.logger.info(\"Saved controller network to %s\", path)", "def save(self, filename):\n pass", "def dump(self):\n if self.data_path.exists():\n raise ValueError(f'Invalid path - it must not exist: {self.data_path}')\n self.data_path.parent.mkdir(parents=True, exist_ok=True)\n\n import json\n\n data = {path.name: is_played for movie, path, is_played in self.iter_movies()}\n log.info(f'{self.lp.save} played data for {len(data)} movies to {self.data_path.as_posix()}')\n if not self.dry_run:\n with self.data_path.open('w') as f:\n json.dump(data, f, indent=4, sort_keys=True)", "def save_spi3d(self):\n lut = self.generate_lut()\n file_path = os.path.join(self.output, self.name)\n file_io.save_file(lut, file_path)", "def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()", "def write_snapshot(self):\n json.dump(self.snapshot, open(paths.RESULTS_FILE, 'w'), indent=4, sort_keys=True)" ]
[ "0.7110283", "0.6522757", "0.6518129", "0.6316745", "0.62322736", "0.6161624", "0.6119347", "0.6053778", "0.6050798", "0.6048917", "0.6039331", "0.60387284", "0.603646", "0.6003288", "0.59928995", "0.59725803", "0.5968473", "0.5963264", "0.593944", "0.5870665", "0.5853435", "0.58410716", "0.58271396", "0.582672", "0.57884085", "0.5754646", "0.57494867", "0.57455456", "0.5742013", "0.5725708", "0.5722168", "0.57151675", "0.56961936", "0.5693089", "0.56733733", "0.5665449", "0.56276846", "0.5621257", "0.5620059", "0.5611944", "0.5611517", "0.55971706", "0.55959123", "0.55931586", "0.55911535", "0.55818605", "0.5581671", "0.5573089", "0.55572855", "0.55431944", "0.55405176", "0.55405176", "0.55082273", "0.54992783", "0.54958266", "0.54897845", "0.5489153", "0.54831105", "0.54762805", "0.5474975", "0.5468968", "0.5468651", "0.54683286", "0.5462692", "0.54516006", "0.5438191", "0.5438033", "0.54376185", "0.54376185", "0.54310817", "0.5428991", "0.5428859", "0.5426245", "0.5425072", "0.5424939", "0.5420609", "0.5420109", "0.5419364", "0.5408438", "0.54068", "0.5404694", "0.5404526", "0.5404198", "0.54001105", "0.53972906", "0.5397268", "0.5393368", "0.53897226", "0.5385303", "0.53806204", "0.5379235", "0.53750366", "0.53685087", "0.5358111", "0.5336053", "0.53311527", "0.533038", "0.53269947", "0.5326224", "0.5323349", "0.53196716" ]
0.0
-1
export master layer settings so we can re apply it
def exportMasterLayerSettings(self): master = rlayer.RenderLayer( 'defaultRenderLayer' ) master.makeCurrent() masterData = {} nodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals'] mnNodes =[ mn.Node( n ) for n in nodes ] for n in mnNodes: for a in n.listAttr( se = True, v = True, w = True ): try: masterData[a] = a.v except: continue pickle.dump( masterData, open( self.masterPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def restore_export_preset():\n run_mel_command(\"FBXResetExport\")", "def exportData(self):\n\t\tlays = rlayer.renderlayers()\n\t\tdata = {}\n\t\tfor l in lays:\n\t\t\tif l.name == 'defaultRenderLayer':\n\t\t\t\tcontinue\n\t\t\tdata[l.name] = {'objects':l.objects, # OBJECTS IN LAYER\n\t\t\t\t\t\t\t'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES\n\t\t\t\t\t\t\t'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS\n\t\t\t\t\t\t\t'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER\n\t\t\t\t\t\t\t}\n\t\tpickle.dump( data, open( self.dataPath.path, \"wb\" ) )", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def configureMaster(self):\n\t\t\n\t\tfin = open('/opt/google/earth/free/drivers.ini', 'r')\n\t\tfout = open('/etc/X11/ge-drivers.ini', 'w')\n\t\t\n\t\tfor line in fin.readlines():\n\t\t\tfout.write(line)\n\t\t\tif line.find('SETTINGS {') != 0:\n\t\t\t\tcontinue\n\t\t\tfout.write('\\tViewSync/send = true\\n')\n\t\t\tfout.write('\\tViewSync/receive = false\\n')\n\n\t\t\tfout.write('\\tViewSync/hostname = %s\\n' %\n\t\t\t\t self.db.getHostAttr('localhost',\n\t\t\t\t\t\t 'Kickstart_PrivateBroadcast'))\n fout.write('\\tViewSync/port = 21567\\n')\n\t\t\tfout.write('\\n')\n\t\t\tfout.write('\\tViewSync/horizFov = 60\\n')\n fout.write('\\tViewSync/rollOffset = 0\\n')\n fout.write('\\tViewSync/yawOffset = 0\\n')\n\t\t\tfout.write('\\tViewSync/pitchOffset = 0\\n')\n\t\t\tfout.write('\\n')\n\n\n\t\tfin.close()\n\t\tfout.close()\n\n\t\tshutil.copy('/etc/X11/ge-drivers.ini', '/opt/google/earth/free/drivers.ini')", "def ExtractInfoAndCopyMaster(self):\n self.ExtractandWriteInfo()\n self.CreateMasterCopy()\n return \"TurnOffMirror\"", "def save_layer(index, settings) -> Action:\n return {\n \"kind\": SAVE_LAYER,\n \"payload\": {\"index\": index, \"settings\": settings},\n }", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def saveToolSettings(*args, **kwargs)->None:\n pass", "def save_switch_configs(self):", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def export_configurations():\n pass", "def reset_cfg():\n _C.merge_from_other_cfg(_CFG_DEFAULT)", "def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def loadLayerSettings(self):\r\n # Get layer attributes\r\n provider = self.layer.dataProvider()\r\n \r\n if not provider.isValid():\r\n logging.getLogger(type(self).__name__).error('invalid layer')\r\n return\r\n \r\n attributes = []\r\n numericAttributes = []\r\n \r\n for field in provider.fields():\r\n attributes.append(field.name())\r\n fieldType = field.type()\r\n if fieldType == QtCore.QVariant.Int or fieldType == QtCore.QVariant.Double:\r\n numericAttributes.append(field.name())\r\n \r\n self.comboBoxLayerAttribute.clear()\r\n self.comboBoxLayerAttribute.addItems(sorted(attributes))\r\n self.comboBoxLayerAttribute.setEnabled(True)\r\n \r\n self.comboBoxStyleCategorizedAttribute.clear()\r\n self.comboBoxStyleCategorizedAttribute.addItems(sorted(attributes))\r\n self.comboBoxStyleCategorizedAttribute.setEnabled(True)\r\n \r\n # Disable graduated style tab if there are no numeric attributes\r\n if numericAttributes:\r\n self.comboBoxStyleGraduatedAttribute.clear()\r\n self.comboBoxStyleGraduatedAttribute.addItems(sorted(numericAttributes))\r\n self.comboBoxStyleGraduatedAttribute.setEnabled(True)\r\n else:\r\n self.tabStyleGraduated.setDisabled(True)\r\n \r\n # Get layer transparency setting\r\n self.sliderLayerTransparency.setValue(self.layer.layerTransparency())\r\n self.spinBoxLayerTransparency.setValue(self.layer.layerTransparency())\r\n \r\n # Get layer symbol fill color\r\n symbols = self.layer.rendererV2().symbols()\r\n self.layerSymbolFillColor = self.styleCategorizedColor = self.styleGraduatedColor = self.styleRuleBasedColor = symbols[0].color()\r\n \r\n # Load layer renderer settings\r\n renderer = self.layer.rendererV2()\r\n \r\n if isinstance(renderer, QgsSingleSymbolRendererV2):\r\n symbols = renderer.symbols()\r\n self.layerSymbolFillColor = symbols[0].color()\r\n self.buttonLayerSymbolFillColor.setStyleSheet('background-color: {0};'.format(self.layerSymbolFillColor.name()))\r\n elif isinstance(renderer, QgsCategorizedSymbolRendererV2):\r\n categories = renderer.categories()\r\n for category in categories:\r\n color = category.symbol().color()\r\n value = str(category.value())\r\n label = category.label()\r\n self.addStyleCategorized(color, value, label)\r\n self.styleCategorizedColor = color\r\n self.buttonStyleCategorizedFillColor.setStyleSheet('background-color: {0};'.format(self.styleCategorizedColor.name()))\r\n attribute = renderer.classAttribute()\r\n self.comboBoxStyleCategorizedAttribute.setCurrentIndex(self.comboBoxStyleCategorizedAttribute.findText(attribute))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Categorized'))\r\n elif isinstance(renderer, QgsGraduatedSymbolRendererV2):\r\n ranges = renderer.ranges()\r\n for range in ranges:\r\n color = range.symbol().color()\r\n lowerValue = range.lowerValue()\r\n upperValue = range.upperValue()\r\n label = range.label()\r\n self.addStyleGraduated(color, lowerValue, upperValue, label)\r\n self.styleGraduatedColor = color\r\n self.buttonStyleGraduatedFillColor.setStyleSheet('background-color: {0};'.format(self.styleGraduatedColor.name()))\r\n attribute = renderer.classAttribute()\r\n self.comboBoxStyleGraduatedAttribute.setCurrentIndex(self.comboBoxStyleGraduatedAttribute.findText(attribute))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Graduated'))\r\n elif isinstance(renderer, QgsRuleBasedRendererV2):\r\n rootRule = renderer.rootRule()\r\n rules = rootRule.children()\r\n for aRule in rules:\r\n color = aRule.symbol().color()\r\n rule = aRule.filterExpression()\r\n label = aRule.label()\r\n minScale = aRule.scaleMinDenom()\r\n maxScale = aRule.scaleMaxDenom()\r\n self.addStyleRuleBased(color, rule, minScale, maxScale, label)\r\n self.styleRuleBasedColor = color\r\n self.buttonStyleRuleBasedFillColor.setStyleSheet('background-color: {0};'.format(self.styleRuleBasedColor.name()))\r\n self.comboBoxStyleType.setCurrentIndex(self.comboBoxStyleType.findText('Rule-based'))\r\n \r\n # Get layer label settings\r\n self.p = QgsPalLayerSettings()\r\n self.p.readFromLayer(self.layer)\r\n \r\n if self.p.enabled:\r\n self.checkBoxLayerLabelEnabled.setChecked(True)\r\n self.comboBoxLayerAttribute.setCurrentIndex(self.comboBoxLayerAttribute.findText(self.p.fieldName))\r\n self.spinBoxLabelSize.setValue(self.p.textFont.pointSize())\r\n self.labelColor = self.p.textColor\r\n self.buttonLabelColor.setStyleSheet('background-color: {0};'.format(self.labelColor.name()))", "def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')", "def tempcontrol_preset_save(self):\n with open(\n self.tempcontrol_presets_path\n + \"{}.json\".format(self.tempcontrol_preset_currentFilename),\n \"w\",\n ) as output:\n output.write(json.dumps(self.tempcontrol_conf))", "def configure_as_preset(self, val):\n if val == True:\n if self.active:\n self._preset_save_raw = self.active.ecc_raw\n self._preset_save_dataset = self.active.ecc_dataset\n self._preset_save_dataset_id = self.active.ecc_dataset_id\n self.active.ecc_raw = None\n self.active.ecc_dataset = None\n self.active.ecc_dataset_id = None\n if self.active._panel:\n self._preset_save_filename = self.active._panel._filename\n self.active._panel._filename = \"\"\n else:\n if self.active:\n self.active.ecc_raw = self._preset_save_raw\n self.active.ecc_dataset = self._preset_save_dataset\n self.active.ecc_dataset_id = self._preset_save_dataset_id\n self._preset_save_raw = None\n self._preset_save_dataset = None\n self._preset_save_dataset_id = None\n if self.active._panel:\n self.active._panel._filename = self._preset_save_filename\n self._preset_save_filename = \"\"", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()", "def persist_tools_options(self, *args):\n\n\t\t# Panel-wide classic tools options (they are not Gio actions!)\n\t\tself._tools_gsettings.set_int('last-size', self.get_tool_width())\n\t\tself._persist_color(self.get_left_color(), 'last-left-rgba')\n\t\tself._persist_color(self.get_right_color(), 'last-right-rgba')\n\n\t\t# Tool-wide boolean actions\n\t\tfor action_name in self._boolean_actions_from_gsetting:\n\t\t\tkey_name = self._boolean_actions_from_gsetting[action_name]\n\t\t\tself._persist_boolean(action_name, key_name)\n\n\t\t# Tool-wide \"enum\" actions\n\t\tfor action_name in self._string_actions_from_gsetting:\n\t\t\tkey_name = self._string_actions_from_gsetting[action_name]\n\t\t\tself._persist_string(action_name, key_name)", "def copySettings(self):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True, hd=True)\n\n attrData = []\n for attr in attrs:\n value = cmds.getAttr(networkNode + \".\" + attr)\n attrData.append([attr, value])\n\n # write out attrData to a temp file\n tempDir = cmds.internalVar(userTmpDir=True)\n clipboardFile = os.path.normcase(os.path.join(tempDir, \"ART_clipboard.txt\"))\n\n f = open(clipboardFile, 'w')\n\n # dump the data with json\n json.dump(attrData, f)\n f.close()", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data", "def get_save_data(self):\n data = super().get_save_data()\n data['palette'] = self.palette\n data['levels'] = self.levels\n return data", "def clear_layers_name():\n set_keep['_layers_name_list'] =[]", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def _augment_pipeline_cfg(self):", "def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config", "def save_settings():\n\n dont_save = ['VISIONEGG_CONFIG_FILE',\n 'VISIONEGG_SYSTEM_DIR',\n 'VISIONEGG_USER_DIR',\n ]\n\n if not VisionEgg.config.VISIONEGG_CONFIG_FILE:\n raise RuntimeError(\"No config file in use.\")\n re_setting_finder = re.compile(r\"^\\s?((?:VISIONEGG_[A-Z_]*)|(?:SYNCLYNC_[A-Z_]*))\\s?=\\s?(\\S*)\\s?$\",re.IGNORECASE)\n\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"r\")\n orig_lines = orig_file.readlines()\n\n line_ending = orig_lines[0][-2:]\n if line_ending[0] not in ['\\r','\\n','\\l']:\n line_ending = line_ending[1]\n\n out_file_lines = []\n\n saved_config_vars = []\n\n for line in orig_lines:\n out_line = line # The output is the same as the input unless there's a match\n match = re_setting_finder.match(line)\n if match:\n name = match.group(1).upper()\n if name in VisionEgg.config.__dict__.keys():\n if name not in dont_save:\n # Change the output line\n out_line = (\"%s = %s\"%(name,getattr(VisionEgg.config,name,))) + line_ending\n saved_config_vars.append(name)\n out_file_lines.append(out_line)\n\n # Close and reopen orig_file in write mode\n orig_file.close()\n orig_file = open(VisionEgg.config.VISIONEGG_CONFIG_FILE,\"w\")\n for line in out_file_lines:\n orig_file.write(line)", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def export_all_components(self) -> None:\n logging.message('Exporting current config.')\n for c in COMPONENT_MAP:\n self.export_components(c['file'], c['type'])\n\n aliases.export_aliases()\n export_groups(self)", "def copy_config(cfg):\n res= dict(cfg)\n #model_param = dict(cfg['model_param'])\n model_param = dict(cfg.get('model_param', {}))\n res['model_param'] = model_param\n return res", "def save_cmake_config(self):\n # backup the original configuration set in setup.py\n self._init_config = dict(\n config=self.config,\n generator=self.generator,\n parallel=self.parallel,\n configure_args=self.configure_args,\n build_args=self.build_args,\n install_args=self.install_args,\n )", "def copy_marvin_config(self):\n print(\"==> Making local copy of Marvin Config file\")\n marvin_filename = self.marvin_config.split('/')[-1]\n open(marvin_filename, \"w\").write(json.dumps(self.config, indent=4))", "def writeConfig(self):\n targetFile = \"%s/%s\" % (self.workingDir, self.merge_pset_file)\n handle = open(targetFile, 'w')\n handle.write(self.mergeConfig())\n handle.close()\n return", "def writeCameraSettings(self):\n pass", "def GetDefaultLayerProperties():\r\n pass", "def apply_settings():\n\n scs_globals = _get_scs_globals()\n\n # avoid recursion if another apply settings is running already\n if scs_globals.config_update_lock:\n return False\n\n # NOTE: save file paths in extra variables and apply them on the end\n # to make sure all of the settings are loaded first.\n # This is needed as some libraries reading are driven by other values from config file.\n # For example: \"use_infixed\"\n scs_project_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.scs_project_path, scs_globals)\n shader_presets_filepath = _property_utils.get_by_type(bpy.types.GlobalSCSProps.shader_presets_filepath, scs_globals)\n trigger_actions_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_rel_path, scs_globals)\n sign_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_rel_path, scs_globals)\n tsem_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_rel_path, scs_globals)\n traffic_rules_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path, scs_globals)\n hookup_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.hookup_library_rel_path, scs_globals)\n matsubs_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.matsubs_library_rel_path, scs_globals)\n sun_profiles_library_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sun_profiles_lib_path, scs_globals)\n conv_hlpr_converters_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.conv_hlpr_converters_path, scs_globals)\n\n # NOTE: as dump level is written in same section as config type\n # applying it directly might take place before we get information about config type\n # so it has to be saved into variable and applied only if global settings are loaded from config file\n dump_level = scs_globals.dump_level\n\n # lock update now, as we don't want any properties update functions to trigger rewrite of config file\n # which would lead to unwanted recursion\n engage_config_lock()\n\n config_container = _pix.get_data_from_file(get_config_filepath(), \" \")\n\n # avoid applying process of config if not present (most probably permission problems on config creation)\n if config_container is not None:\n\n settings_file_valid = 0\n for section in config_container:\n if settings_file_valid == 2:\n if section.type == \"Paths\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ProjectPath\":\n scs_project_path = prop[1]\n elif prop[0] == \"ShaderPresetsFilePath\":\n shader_presets_filepath = prop[1]\n elif prop[0] == \"TriggerActionsRelFilePath\":\n trigger_actions_rel_path = prop[1]\n elif prop[0] == \"TriggerActionsUseInfixed\":\n scs_globals.trigger_actions_use_infixed = prop[1]\n elif prop[0] == \"SignRelFilePath\":\n sign_library_rel_path = prop[1]\n elif prop[0] == \"SignUseInfixed\":\n scs_globals.sign_library_use_infixed = prop[1]\n elif prop[0] == \"TSemProfileRelFilePath\":\n tsem_library_rel_path = prop[1]\n elif prop[0] == \"TSemProfileUseInfixed\":\n scs_globals.tsem_library_use_infixed = prop[1]\n elif prop[0] == \"TrafficRulesRelFilePath\":\n traffic_rules_library_rel_path = prop[1]\n elif prop[0] == \"TrafficRulesUseInfixed\":\n scs_globals.traffic_rules_library_use_infixed = prop[1]\n elif prop[0] == \"HookupRelDirPath\":\n hookup_library_rel_path = prop[1]\n elif prop[0] == \"MatSubsRelFilePath\":\n matsubs_library_rel_path = prop[1]\n elif prop[0] == \"SunProfilesFilePath\":\n sun_profiles_library_path = prop[1]\n elif prop[0] == \"ConvertersPath\":\n conv_hlpr_converters_path = prop[1]\n elif prop[0] == \"UseAlternativeBases\":\n scs_globals.use_alternative_bases = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Import\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ImportScale\":\n scs_globals.import_scale = float(prop[1])\n elif prop[0] == \"PreservePathForExport\":\n scs_globals.import_preserve_path_for_export = prop[1]\n elif prop[0] == \"ImportPimFile\":\n scs_globals.import_pim_file = prop[1]\n elif prop[0] == \"UseWelding\":\n scs_globals.import_use_welding = prop[1]\n elif prop[0] == \"WeldingPrecision\":\n scs_globals.import_welding_precision = prop[1]\n elif prop[0] == \"UseNormals\":\n scs_globals.import_use_normals = prop[1]\n elif prop[0] == \"ImportPitFile\":\n scs_globals.import_pit_file = prop[1]\n elif prop[0] == \"LoadTextures\":\n scs_globals.import_load_textures = prop[1]\n elif prop[0] == \"ImportPicFile\":\n scs_globals.import_pic_file = prop[1]\n elif prop[0] == \"ImportPipFile\":\n scs_globals.import_pip_file = prop[1]\n elif prop[0] == \"ImportPisFile\":\n scs_globals.import_pis_file = prop[1]\n elif prop[0] == \"ConnectedBones\":\n scs_globals.import_connected_bones = prop[1]\n elif prop[0] == \"BoneImportScale\":\n scs_globals.import_bone_scale = float(prop[1])\n elif prop[0] == \"ImportPiaFile\":\n scs_globals.import_pia_file = prop[1]\n elif prop[0] == \"IncludeSubdirsForPia\":\n scs_globals.import_include_subdirs_for_pia = prop[1]\n elif section.type == \"Export\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ExportScale\":\n scs_globals.export_scale = float(prop[1])\n elif prop[0] == \"ApplyModifiers\":\n scs_globals.export_apply_modifiers = prop[1]\n elif prop[0] == \"ExcludeEdgesplit\":\n scs_globals.export_exclude_edgesplit = prop[1]\n elif prop[0] == \"IncludeEdgesplit\":\n scs_globals.export_include_edgesplit = prop[1]\n elif prop[0] == \"ActiveUVOnly\":\n scs_globals.export_active_uv_only = prop[1]\n elif prop[0] == \"ExportVertexGroups\":\n scs_globals.export_vertex_groups = prop[1]\n elif prop[0] == \"ExportVertexColor\":\n scs_globals.export_vertex_color = prop[1]\n elif prop[0] == \"ExportVertexColorType\":\n scs_globals.export_vertex_color_type = str(prop[1])\n elif prop[0] == \"ExportVertexColorType7\":\n scs_globals.export_vertex_color_type_7 = str(prop[1])\n elif prop[0] == \"ExportPimFile\":\n scs_globals.export_pim_file = prop[1]\n elif prop[0] == \"OutputType\":\n scs_globals.export_output_type = prop[1]\n elif prop[0] == \"ExportPitFile\":\n scs_globals.export_pit_file = prop[1]\n elif prop[0] == \"ExportPicFile\":\n scs_globals.export_pic_file = prop[1]\n elif prop[0] == \"ExportPipFile\":\n scs_globals.export_pip_file = prop[1]\n elif prop[0] == \"SignExport\":\n scs_globals.export_write_signature = prop[1]\n elif section.type == \"GlobalDisplay\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"DisplayLocators\":\n scs_globals.display_locators = prop[1]\n elif prop[0] == \"LocatorSize\":\n scs_globals.locator_size = float(prop[1])\n elif prop[0] == \"LocatorEmptySize\":\n scs_globals.locator_empty_size = float(prop[1])\n elif prop[0] == \"DisplayConnections\":\n scs_globals.display_connections = prop[1]\n elif prop[0] == \"CurveSegments\":\n scs_globals.curve_segments = prop[1]\n elif prop[0] == \"OptimizedConnsDrawing\":\n scs_globals.optimized_connections_drawing = prop[1]\n elif prop[0] == \"DisplayTextInfo\":\n scs_globals.display_info = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"GlobalColors\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"PrefabLocatorsWire\":\n scs_globals.locator_prefab_wire_color = prop[1]\n elif prop[0] == \"ModelLocatorsWire\":\n scs_globals.locator_model_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsWire\":\n scs_globals.locator_coll_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsFace\":\n scs_globals.locator_coll_face_color = prop[1]\n elif prop[0] == \"NavigationCurveBase\":\n scs_globals.np_connection_base_color = prop[1]\n elif prop[0] == \"MapLineBase\":\n scs_globals.mp_connection_base_color = prop[1]\n elif prop[0] == \"TriggerLineBase\":\n scs_globals.tp_connection_base_color = prop[1]\n elif prop[0] == \"InfoText\":\n scs_globals.info_text_color = prop[1]\n elif prop[0] == \"BasePaint\":\n scs_globals.base_paint_color = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Header\":\n for prop in section.props:\n if prop[0] == \"FormatVersion\":\n if prop[1] == 1:\n settings_file_valid += 1\n elif prop[0] == \"Type\":\n if prop[1] == \"Configuration\":\n settings_file_valid += 1\n elif prop[0] == \"DumpLevel\":\n dump_level = prop[1]\n elif prop[0] == \"ConfigStoragePlace\":\n scs_globals.config_storage_place = prop[1]\n\n # if settings are read directly from blend file,\n # release update lock and don't search/apply any settings further\n if prop[1] == \"BlendFile\":\n settings_file_valid += 1\n\n # as dump level can be read already (it can be placed above config storage place property),\n # reset local variable back to value that was saved with blend file\n dump_level = scs_globals.dump_level\n\n break # to avoid further reading of header properties, so dump_level won't be overwritten unintentionally\n\n scs_globals.dump_level = dump_level\n\n # now as last apply all of the file paths\n # NOTE: applying paths is crucial for libraries\n # (they are reloaded/initiated in property update functions).\n if bpy.app.background: # if blender runs without UI then apply libraries directly as async operator is UI depended\n\n scs_globals.scs_project_path = scs_project_path\n scs_globals.shader_presets_filepath = shader_presets_filepath\n scs_globals.trigger_actions_rel_path = trigger_actions_rel_path\n scs_globals.sign_library_rel_path = sign_library_rel_path\n scs_globals.tsem_library_rel_path = tsem_library_rel_path\n scs_globals.traffic_rules_library_rel_path = traffic_rules_library_rel_path\n scs_globals.hookup_library_rel_path = hookup_library_rel_path\n scs_globals.matsubs_library_rel_path = matsubs_library_rel_path\n scs_globals.sun_profiles_lib_path = sun_profiles_library_path\n scs_globals.conv_hlpr_converters_path = conv_hlpr_converters_path\n\n else: # if blender is started normally use asynchronous operator to reload libraries\n\n bpy.ops.world.scs_paths_initialization('INVOKE_DEFAULT', paths_list=[\n {\"name\": \"project base path\", \"attr\": \"scs_project_path\", \"path\": scs_project_path},\n {\"name\": \"shader presets\", \"attr\": \"shader_presets_filepath\", \"path\": shader_presets_filepath},\n {\"name\": \"trigger actions library\", \"attr\": \"trigger_actions_rel_path\", \"path\": trigger_actions_rel_path},\n {\"name\": \"sign library\", \"attr\": \"sign_library_rel_path\", \"path\": sign_library_rel_path},\n {\"name\": \"traffic semaphore library\", \"attr\": \"tsem_library_rel_path\", \"path\": tsem_library_rel_path},\n {\"name\": \"traffic rules library\", \"attr\": \"traffic_rules_library_rel_path\", \"path\": traffic_rules_library_rel_path},\n {\"name\": \"hookups library\", \"attr\": \"hookup_library_rel_path\", \"path\": hookup_library_rel_path},\n {\"name\": \"material substance library\", \"attr\": \"matsubs_library_rel_path\", \"path\": matsubs_library_rel_path},\n {\"name\": \"sun profiles library\", \"attr\": \"sun_profiles_lib_path\", \"path\": sun_profiles_library_path},\n {\"name\": \"converters file path\", \"attr\": \"conv_hlpr_converters_path\", \"path\": conv_hlpr_converters_path},\n ])\n\n # release lock as properties are applied\n release_config_lock(use_paths_init_callback=not bpy.app.background)\n\n return True", "def export(\n self,\n dest_file: Optional[IO[str]] = None, *,\n inc_version: bool = True,\n minimal: bool = False,\n disp_multiblend: bool = True,\n ) -> Optional[str]:\n if dest_file is None:\n string_buf = io.StringIO()\n dest_file = string_buf\n else:\n string_buf = None\n\n if inc_version:\n # Increment this to indicate the map was modified\n self.map_ver += 1\n\n dest_file.write('versioninfo\\n{\\n')\n dest_file.write(f'\\t\"editorversion\" \"{self.hammer_ver}\"\\n')\n dest_file.write(f'\\t\"editorbuild\" \"{self.hammer_build}\"\\n')\n dest_file.write(f'\\t\"mapversion\" \"{self.map_ver}\"\\n')\n dest_file.write(f'\\t\"formatversion\" \"{self.format_ver}\"\\n')\n dest_file.write('\\t\"prefab\" \"' +\n srctools.bool_as_int(self.is_prefab) + '\"\\n}\\n')\n\n dest_file.write('visgroups\\n{\\n')\n for vis in self.vis_tree:\n vis.export(dest_file, ind='\\t')\n dest_file.write('}\\n')\n\n if not minimal:\n dest_file.write('viewsettings\\n{\\n')\n dest_file.write('\\t\"bSnapToGrid\" \"' +\n srctools.bool_as_int(self.snap_grid) + '\"\\n')\n dest_file.write('\\t\"bShowGrid\" \"' +\n srctools.bool_as_int(self.show_grid) + '\"\\n')\n dest_file.write('\\t\"bShowLogicalGrid\" \"' +\n srctools.bool_as_int(self.show_logic_grid) + '\"\\n')\n dest_file.write(f'\\t\"nGridSpacing\" \"{self.grid_spacing}\"\\n')\n dest_file.write('\\t\"bShow3DGrid\" \"' +\n srctools.bool_as_int(self.show_3d_grid) + '\"\\n}\\n')\n\n # The worldspawn version should always match the global value.\n # Also force the classname, since this will crash if it's different.\n self.spawn['mapversion'] = str(self.map_ver)\n self.spawn['classname'] = 'worldspawn'\n self.spawn.export(dest_file, disp_multiblend=disp_multiblend, _is_worldspawn=True)\n del self.spawn['mapversion']\n\n for ent in self.entities:\n ent.export(dest_file, disp_multiblend=disp_multiblend)\n\n if not minimal:\n dest_file.write('cameras\\n{\\n')\n if len(self.cameras) == 0:\n self.active_cam = -1\n dest_file.write(f'\\t\"activecamera\" \"{self.active_cam}\"\\n')\n for cam in self.cameras:\n cam.export(dest_file, '\\t')\n dest_file.write('}\\n')\n\n dest_file.write('cordons\\n{\\n')\n if len(self.cordons) > 0:\n dest_file.write('\\t\"active\" \"' +\n srctools.bool_as_int(self.cordon_enabled) +\n '\"\\n')\n for cord in self.cordons:\n cord.export(dest_file, '\\t')\n else:\n dest_file.write('\\t\"active\" \"0\"\\n')\n dest_file.write('}\\n')\n\n if self.quickhide_count > 0:\n dest_file.write(\n 'quickhide\\n'\n '{\\n'\n f'\\t\"count\" \"{self.quickhide_count}\"\\n'\n '}\\n'\n )\n\n if string_buf is not None:\n return string_buf.getvalue()\n else:\n return None", "def save_env():\n global vis\n vis.save([vis.env])", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def mpl_to_root(mpl_plot_dict):\n\t\troot_plot_dict = mpl_plot_dict.copy()\n\t\troot_plot_dict.update({\n\t\t\t'plot_modules': ['ExportRoot'],\n\t\t\t'filename': 'combination_ZJet_' + file_label + time.strftime(\"%Y-%m-%d\", now),\n\t\t\t'file_mode': ('RECREATE' if mpl_to_root.first else 'UPDATE'),\n\t\t})\n\t\tmpl_to_root.first = False\n\t\treturn root_plot_dict", "def save_settings(self):\r\n self.QtSettings.beginGroup(\"MainWindow\")\r\n self.QtSettings.setValue(\"geometry\",self.saveGeometry())\r\n self.QtSettings.setValue(\"state\",self.saveState())\r\n self.QtSettings.endGroup()\r\n \r\n #save element content\r\n self.QtSettings.beginGroup(\"Settings\")\r\n pyguitools.gui_save(self.ui,self.QtSettings)\r\n self.QtSettings.endGroup()", "def merge_into_settings(self, settings):\n if not self._meta_dict:\n self._load_from_file()\n\n settings.chat_name = self._meta_dict[DumpMetadata.CHAT_NAME]\n settings.last_message_id = self._meta_dict[DumpMetadata.LAST_MESSAGE_ID]\n settings.exporter = self._meta_dict[DumpMetadata.EXPORTER]", "def TRT_OverlayDigitizationCfg(flags, **kwargs):\n acc = TRT_OverlayDigitizationBasicCfg(flags, **kwargs)\n acc.merge(TRT_OutputCfg(flags))\n return acc", "def export_project_dump(self, key):", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()", "def apply_grab_settings(self):\n raise NotImplementedError", "def RPC_DigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(RPC_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc", "def _exportNode(self):\n node = self._extractProperties()\n self._logger.info('settings exported.')\n return node", "def save_master(master_directory):\n global MASTER_SHELF\n MASTER_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"MASTER_SHELF\")))\n MASTER_SHELF[\"master\"] = master_directory\n MASTER_SHELF.close()", "def writeSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.settings.setValue(vol,self.param.vol[i-1])\n info = f\"info{i}\"\n self.settings.setValue(info,self.param.info[i-1])\n ip = f\"ip{i}\"\n self.settings.setValue(ip,self.param.ip[i-1])\n muted = f\"muted{i}\"\n self.settings.setValue(muted,self.param.muted[i-1])", "def expandWithoutMutex(self, previousLayer):\n previousLayerProposition = previousLayer.getPropositionLayer()\n \"*** YOUR CODE HERE ***\"", "def _propagate_material_settings(self, bm, layer):\n state = layer.state\n\n # Shade Flags\n if not bm.use_mist:\n state.shadeFlags |= hsGMatState.kShadeNoFog # Dead in CWE\n state.shadeFlags |= hsGMatState.kShadeReallyNoFog\n\n if bm.use_shadeless:\n state.shadeFlags |= hsGMatState.kShadeWhite\n\n # Colors\n layer.ambient = utils.color(bpy.context.scene.world.ambient_color)\n layer.preshade = utils.color(bm.diffuse_color)\n layer.runtime = utils.color(bm.diffuse_color)\n layer.specular = utils.color(bm.specular_color)\n\n layer.specularPower = min(100.0, float(bm.specular_hardness))\n layer.LODBias = -1.0 # Seems to be the Plasma default\n\n if bm.emit > 0.0:\n # Use the diffuse colour as the emit, scaled by the emit amount\n # (maximum 2.0, so we'll also scale that by 0.5)\n emit_scale = bm.emit * 0.5\n layer.ambient = hsColorRGBA(bm.diffuse_color.r * emit_scale,\n bm.diffuse_color.g * emit_scale,\n bm.diffuse_color.b * emit_scale,\n 1.0)", "def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()", "def restoreRenderSettings():\n bpy.context.scene.render.engine = cache.values[\"engine\"]\n bpy.context.scene.render.film_transparent = cache.values[\"transparent\"]\n\n bpy.context.scene.render.filepath = cache.values[\"filepath\"]\n bpy.context.scene.render.image_settings.file_format = cache.values[\"format\"]\n bpy.context.scene.render.image_settings.color_mode = cache.values[\"mode\"]\n bpy.context.scene.render.image_settings.color_depth = cache.values[\"depth\"]\n\n bpy.context.scene.render.resolution_x = cache.values[\"resolutionX\"]\n bpy.context.scene.render.resolution_y = cache.values[\"resolutionY\"]\n bpy.context.scene.render.resolution_percentage = cache.values[\"percentage\"]\n bpy.context.scene.render.pixel_aspect_x = cache.values[\"aspectX\"]\n bpy.context.scene.render.pixel_aspect_y = cache.values[\"aspectY\"]\n\n if cache.values[\"world\"]:\n bpy.context.scene.world = cache.values[\"world\"]", "def globalsettings(tree):\n globepath = '.SETTINGS.EXPERIMENT.GLOBAL'\n backpath = globepath + '.BACKGROUND'\n puffpath = globepath + '.GAS_PUFF'\n PSUpath = globepath + '.PSU_VOLTS'\n tree.addNode(globepath)\n tree.addNode(backpath)\n AddNodeWithTag(tree, backpath + ':REF_SHOTNUM', 'NUMERIC', \n 'GLOBE_REFERENCE_SHOTNUM')\n AddNumericWithUnit(tree, backpath + ':PRESSURE','GLOBE_BACKGROUNDPRESSURE',\n 'torr')\n AddNumericWithUnit(tree, backpath + ':CRYOTEMP_V', 'GLOBE_CRYOTEMP_V', 'V')\n AddNumericWithUnit(tree, backpath + ':CRYOTEMP_K', 'GLOBE_CRYOTEMP_K', 'K')\n tree.addNode(backpath + '.CRYOTEMP_K.CALIBRATION')\n AddNodeWithTag(tree, backpath + '.CRYOTEMP_K.CALIBRATION:COMMENT', 'TEXT',\n 'CRYOCALIBRATION_COMMENT')\n AddNumericWithUnit(tree, backpath + '.CRYOTEMP_K.CALIBRATION:X',\n 'CRYOCALIBRATION_X', 'V')\n AddNumericWithUnit(tree, backpath + '.CRYOTEMP_K.CALIBRATION:Y',\n 'CRYOCALIBRATION_Y', 'K')\n \n tree.addNode(puffpath)\n tree.addNode(puffpath + '.TYPE')\n AddNodeWithTag(tree, puffpath + '.TYPE:A', 'TEXT', 'GLOBE_GAS_A_TYPE')\n AddNodeWithTag(tree, puffpath + '.TYPE:B', 'TEXT', 'GLOBE_GAS_B_TYPE')\n AddNodeWithTag(tree, puffpath + '.TYPE:C', 'TEXT', 'GLOBE_GAS_C_TYPE')\n tree.addNode(puffpath + '.PRESSURE')\n AddNumericWithUnit(tree, puffpath + '.PRESSURE:A', 'GLOBE_GAS_A_PRESSURE',\n 'psi')\n AddNumericWithUnit(tree, puffpath + '.PRESSURE:B', 'GLOBE_GAS_B_PRESSURE',\n 'psi')\n AddNumericWithUnit(tree, puffpath + '.PRESSURE:C', 'GLOBE_GAS_C_PRESSURE',\n 'psi')\n tree.addNode(puffpath + '.BOTTLE_P')\n AddNumericWithUnit(tree, puffpath + '.BOTTLE_P:A', 'GLOBE_GAS_A_BOTTLE_P',\n 'psi')\n AddNumericWithUnit(tree, puffpath + '.BOTTLE_P:B', 'GLOBE_GAS_B_BOTTLE_P',\n 'psi')\n AddNumericWithUnit(tree, puffpath + '.BOTTLE_P:C', 'GLOBE_GAS_C_BOTTLE_P',\n 'psi')\n tree.addNode(puffpath + '.DISTRIBUTION')\n AddNodeWithTag(tree, puffpath + '.DISTRIBUTION:INNER', 'TEXT',\n 'GLOBE_INNER_DISTRIB')\n AddNodeWithTag(tree, puffpath + '.DISTRIBUTION:MIDDLE', 'TEXT',\n 'GLOBE_MIDDLE_DISTRIB')\n AddNodeWithTag(tree, puffpath + '.DISTRIBUTION:OUTER', 'TEXT',\n 'GLOBE_OUTER_DISTRIB')\n \n tree.addNode(PSUpath)\n tree.addNode(PSUpath + '.BANKS')\n AddNumericWithUnit(tree, PSUpath + '.BANKS:PSU1', 'GLOBE_BANKVOLT_PSU1',\n 'V')\n AddNumericWithUnit(tree, PSUpath + '.BANKS:PSU2', 'GLOBE_BANKVOLT_PSU2',\n 'V')\n AddNumericWithUnit(tree, PSUpath + '.BANKS:PSU3', 'GLOBE_BANKVOLT_PSU3',\n 'V')\n tree.addNode(PSUpath + '.BIAS')\n AddNumericWithUnit(tree, PSUpath + '.BIAS:BIAS1', 'GLOBE_BIASVOLT_BIAS1',\n 'V')\n AddNumericWithUnit(tree, PSUpath + '.BIAS:BIAS2', 'GLOBE_BIASVOLT_BIAS2',\n 'V')\n\n tree.addNode(PSUpath + '.PUFF_PSU')\n tree.addNode(PSUpath + '.PUFF_PSU.PUFF1')\n AddNumericWithUnit(tree, PSUpath + '.PUFF_PSU.PUFF1:CH1', \n 'GLOBE_PUFF_PSU1_VOLT_CH1', 'V')\n AddNumericWithUnit(tree, PSUpath + '.PUFF_PSU.PUFF1:CH2', \n 'GLOBE_PUFF_PSU1_VOLT_CH2', 'V')\n AddNumericWithUnit(tree, PSUpath + '.PUFF_PSU.PUFF1:CH3', \n 'GLOBE_PUFF_PSU1_VOLT_CH3', 'V')", "def clean_master():", "def init_channel_master_config_data():\n config_data = {}\n config_data[\"tabs\"] = []\n config_data[\"tabs_data\"] = {}\n config_data[\"current_tab\"] = 0\n\n return config_data", "def build_saver(self):\n #this is used to restore and save the graph default_saver.restore\n\n default_saver = tf.train.Saver(max_to_keep=3, allow_empty=True)\n self.savers = {self.name: default_saver}", "def save_setup_info(setup):\n grids = Facade.prepare_setup_data(setup)\n setup.subspaces_gridpoints_JSON = grids\n setup.save()", "def update_settings(self):\n settings = {\n \"reference\": self,\n \"draw_tangents\": self.cbDrawTangents.isChecked(),\n }\n if self.cbShowSolarAngle.isChecked():\n settings[\"show_solar_angle\"] = self.cbSolarAngleType.currentText(), self.cbSolarBody.currentText()\n else:\n settings[\"show_solar_angle\"] = None\n\n self.view.set_remote_sensing_appearance(settings)", "def save_to_cfg(self):\n self.syscfg['sem']['mag_px_size_factor'] = str(self.MAG_PX_SIZE_FACTOR)\n self.cfg['sem']['stage_min_x'] = str(self.stage_limits[0])\n self.cfg['sem']['stage_max_x'] = str(self.stage_limits[1])\n self.cfg['sem']['stage_min_y'] = str(self.stage_limits[2])\n self.cfg['sem']['stage_max_y'] = str(self.stage_limits[3])\n self.syscfg['stage']['sem_stage_limits'] = str(self.stage_limits)\n self.syscfg['stage']['sem_motor_speed'] = str(\n [self.motor_speed_x, self.motor_speed_y])\n self.cfg['sem']['motor_speed_x'] = str(self.motor_speed_x)\n self.cfg['sem']['motor_speed_y'] = str(self.motor_speed_y)\n self.cfg['sem']['stage_move_wait_interval'] = str(\n self.stage_move_wait_interval)\n self.cfg['sem']['stage_move_check_interval'] = str(\n self.stage_move_check_interval)\n self.cfg['sem']['eht'] = '{0:.2f}'.format(self.target_eht)\n self.cfg['sem']['beam_current'] = str(int(self.target_beam_current))\n self.cfg['sem']['aperture_size'] = str(self.target_aperture_size)\n self.cfg['sem']['grab_frame_dwell_time'] = str(self.grab_dwell_time)\n self.cfg['sem']['grab_frame_pixel_size'] = '{0:.1f}'.format(\n self.grab_pixel_size)\n self.cfg['sem']['grab_frame_size_selector'] = str(\n self.grab_frame_size_selector)\n self.cfg['sem']['grab_frame_size_xy'] = str(\n self.STORE_RES[self.grab_frame_size_selector])\n self.cfg['sem']['bsd_contrast'] = str(self.bsd_contrast)\n self.cfg['sem']['bsd_brightness'] = str(self.bsd_brightness)\n self.cfg['sem']['bsd_bias'] = str(self.bsd_bias)\n self.cfg['sem']['auto_beam_blank'] = str(self.auto_beam_blank)\n self.syscfg['stage']['sem_xy_tolerance'] = str(self.xy_tolerance)\n self.syscfg['stage']['sem_z_tolerance'] = str(self.z_tolerance)\n # Motor diagnostics\n self.syscfg['stage']['sem_xyz_move_counter'] = json.dumps(\n self.total_xyz_move_counter)\n self.syscfg['stage']['sem_slow_xy_move_counter'] = str(\n self.slow_xy_move_counter)\n self.syscfg['stage']['sem_failed_xyz_move_counter'] = json.dumps(\n self.failed_xyz_move_counter)\n # Maintenance moves\n self.syscfg['stage']['sem_use_maintenance_moves'] = str(\n self.use_maintenance_moves)\n self.syscfg['stage']['sem_maintenance_move_interval'] = str(int(\n self.maintenance_move_interval))", "def clear_all_output_settings(self):\n self.general_information = []\n self.object_information = []\n self.camera_information = []\n self.light_information = []\n self.bounding_box_information = []", "def save(self, mfile):\n pickle.dump((self.params, self.layers, self.conv_layers),\n mfile, protocol=pickle.HIGHEST_PROTOCOL)", "def set_export_touchstone(self, activate, export_dir=\"\"):\n settings = []\n if activate:\n settings.append(\"NAME:options\")\n settings.append(\"ExportAfterSolve:=\")\n settings.append(True)\n settings.append(\"ExportDir:=\")\n settings.append(export_dir)\n elif not activate:\n settings.append(\"NAME:options\")\n settings.append(\"ExportAfterSolve:=\")\n settings.append(False)\n self.odesign.DesignOptions(settings, 0)\n return True", "def save_settings(self, plugin_settings, instance_settings):\n instance_settings.set_value(\"output_directory\", self.output_directory)\n instance_settings.set_value(\"labels\", self.labels)\n if self._sub:\n instance_settings.set_value(\"topic_name\", self._sub.name)", "def save_config(self):\n self.config.app_w = self.width()\n self.config.app_h = self.height()\n self.config.splitter = self.splitter.saveState()\n self.config.save()", "def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return", "def saveObjectMaps(self):\n if self.objectMaps == None: return\n path = os.path.join(self.dir,settings['mosh.modInfos.objectMaps'])\n outDir = os.path.split(path)[0]\n if not os.path.exists(outDir): os.makedirs(outDir)\n cPickle.dump(self.objectMaps,open(path,'wb'),2)", "def RPC_OverlayDigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"DigitizationTool\" not in kwargs:\n tool = acc.popToolsAndMerge(RPC_OverlayDigitizationToolCfg(flags))\n kwargs[\"DigitizationTool\"] = tool\n\n if flags.Concurrency.NumThreads > 0:\n kwargs.setdefault(\"Cardinality\", flags.Concurrency.NumThreads)\n\n # Set common overlay extra inputs\n kwargs.setdefault(\"ExtraInputs\", flags.Overlay.ExtraInputs)\n\n RPC_Digitizer = CompFactory.RPC_Digitizer\n acc.addEventAlgo(RPC_Digitizer(name=\"RPC_OverlayDigitizer\", **kwargs))\n return acc", "def build_ac_export_infos(self):\n if not self.configure_ac_info:\n return\n export_variables = self.configure_ac_info[\"configure_ac\"][\"export_variables\"]\n variables = self.configure_ac_info[\"configure_ac\"][\"variables\"]\n for export_var in export_variables:\n src = variables.get(export_var, dict())\n dest = export_variables.get(export_var, dict())\n if len(src) == 0:\n continue\n\n if len(dest.get(\"defined\", list())) != 0 or len(dest.get(\"undefined\", list())) != 0:\n continue\n\n export_variables[export_var] = src\n\n for preset_export_var in preset_output_variables:\n if preset_export_var in export_variables:\n continue\n\n src = variables.get(preset_export_var, dict())\n if len(src) == 0:\n continue\n\n export_variables[preset_export_var] = src", "def fill_export_section():\n section = _SectionData(\"Export\")\n section.props.append((\"ExportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_scale)))\n section.props.append((\"ApplyModifiers\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_apply_modifiers))))\n section.props.append((\"ExcludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_exclude_edgesplit))))\n section.props.append((\"IncludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_include_edgesplit))))\n section.props.append((\"ActiveUVOnly\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_active_uv_only))))\n section.props.append((\"ExportVertexGroups\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_groups))))\n section.props.append((\"ExportVertexColor\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color))))\n section.props.append((\"ExportVertexColorType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type)))\n section.props.append((\"ExportVertexColorType7\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type_7)))\n # section.props.append((\"ExportAnimFile\", info.get_default_prop_value(bpy.types.GlobalSCSProps.export_anim_file)))\n section.props.append((\"ExportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pim_file))))\n section.props.append((\"OutputType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_output_type)))\n section.props.append((\"ExportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pit_file))))\n section.props.append((\"ExportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pic_file))))\n section.props.append((\"ExportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pip_file))))\n section.props.append((\"SignExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_write_signature))))\n return section", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def save(self):\n with open(self.SETTINGS_FILE, 'w') as handle:\n data = dict()\n for (key, value) in self.__dict__.items():\n if not key.startswith('__'):\n data[key] = value\n json.dump(data, handle)", "def cook_config(ext_config_filename):\n mc = base_model_config()\n with open(ext_config_filename, \"r\") as fp:\n ext_mc = edict(json.load(fp, encoding=\"utf8\"))\n for s in ext_mc.keys():\n mc[s] = ext_mc[s]\n # mc.ANCHOR_BOX = set_anchors(mc)\n # print(np.max(np.square(np.array(set_anchors_testing(mc)) - np.array(set_anchors(mc)))))\n # mc.ANCHORS = len(mc.ANCHOR_BOX)\n # H, W, C = _get_output_shape(mc)\n # mc.MODEL_OUTPUT_SHAPE = [H, W, mc.ANCHOR_PER_GRID]\n return mc", "def setNOctaveLayers(self, octaveLayers): # real signature unknown; restored from __doc__\n pass", "def redo_settings(self):\r\n cF.redo_settings()", "def save_defaults(self):\n\n pass", "def _copy_extension_settings(self, other):\n other.useExtendedMasterSecret = self.useExtendedMasterSecret\n other.requireExtendedMasterSecret = self.requireExtendedMasterSecret\n other.useExperimentalTackExtension = self.useExperimentalTackExtension\n other.sendFallbackSCSV = self.sendFallbackSCSV\n other.useEncryptThenMAC = self.useEncryptThenMAC\n other.usePaddingExtension = self.usePaddingExtension\n # session tickets\n other.padding_cb = self.padding_cb\n other.ticketKeys = self.ticketKeys\n other.ticketCipher = self.ticketCipher\n other.ticketLifetime = self.ticketLifetime\n other.max_early_data = self.max_early_data\n other.ticket_count = self.ticket_count\n other.record_size_limit = self.record_size_limit", "def _restore_layer_visibility(self, layer_view: ImageLayerView):\n layer_visibility_to_restore = self._layer_visibility_by_name.get(layer_view.name)\n if layer_visibility_to_restore is None:\n layer_visibility_to_restore = self._DEFAULT_LAYER_VISIBILITY_BY_NAME.get(layer_view.name)\n if layer_visibility_to_restore is not None:\n layer_view.visible = layer_visibility_to_restore.visible\n layer_view.opacity = layer_visibility_to_restore.opacity", "def build_settings(self, settings):\n \n settings.add_json_panel(\"Network\", self.config, data=network_json)\n settings.add_json_panel(\"Camera\", self.config, data=camera_json)\n settings.add_json_panel(\"CV\", self.config, data=cv_json)\n settings.add_json_panel(\"Admin\", self.config, data=admin_json)", "def dump_default_config():\n output = \"PythiaPlotter_config.py\"\n log.info(\"Dumping config to %s\", output)\n import pythiaplotter.default_config as dc\n shutil.copy(dc.__file__.replace(\".pyc\", \".py\"), output)", "def make_settings():\n settings = {}\n num_of_rocks = 1\n\n obj = json.load(open('assets/add_mesh_rocks.json'))\n presets = [obj[\"settings\"][\"default\"]] + obj[\"settings\"][\"preset\"]\n\n for preset in presets:\n title = preset[\"title\"]\n # SIZE\n size = preset[\"size\"]\n\n x, y, z = size[\"scale\"]\n if title == \"Default\":\n scale = uniform(float(x[\"lower\"]), float(x[\"upper\"]))\n scale_X = [scale, scale]\n scale_Y = [scale, scale]\n scale_Z = [scale, scale]\n else:\n scale_X = [float(x[\"lower\"]), float(x[\"upper\"])]\n scale_Y = [float(y[\"lower\"]), float(y[\"upper\"])]\n scale_Z = [float(z[\"lower\"]), float(z[\"upper\"])]\n\n x, y, z = size[\"skew\"]\n skew_X = float(x[\"value\"])\n skew_Y = float(y[\"value\"])\n skew_Z = float(z[\"value\"])\n\n scale_fac = ast.literal_eval(size[\"scale_fac\"])\n use_scale_dis = bool(size[\"use_scale_dis\"])\n\n # SHAPE\n shape = preset[\"shape\"]\n\n deform = float(shape[\"deform\"])\n rough = float(shape[\"rough\"])\n detail = float(shape[\"detail\"])\n display_detail = float(shape[\"display_detail\"])\n smooth_fac = float(shape[\"smooth_fac\"])\n smooth_it = float(shape[\"smooth_it\"])\n\n\n # MATERIAL\n material = preset[\"material\"]\n \n mat_enable = bool(material[\"mat_enable\"])\n mat_color = ast.literal_eval(material[\"mat_color\"])\n mat_bright = float(material[\"mat_bright\"])\n mat_rough = float(material[\"mat_rough\"])\n mat_spec = float(material[\"mat_spec\"])\n mat_hard = float(material[\"mat_hard\"])\n mat_use_trans = bool(material[\"mat_use_trans\"])\n mat_alpha = float(material[\"mat_alpha\"])\n mat_cloudy = float(material[\"mat_cloudy\"])\n mat_IOR = float(material[\"mat_IOR\"])\n mat_mossy = float(material[\"mat_mossy\"])\n\n # RANDOM\n random = preset[\"random\"]\n\n use_generate = bool(random[\"use_generate\"])\n use_random_seed = bool(random[\"use_random_seed\"])\n user_seed = float(random[\"user_seed\"])\n\n\n settings[title] = [\n context,\n scale_X,\n skew_X,\n scale_Y,\n skew_Y,\n scale_Z,\n skew_Z,\n scale_fac,\n detail,\n display_detail,\n deform,\n rough,\n smooth_fac,\n smooth_it,\n mat_enable,\n mat_color,\n mat_bright,\n mat_rough,\n mat_spec,\n mat_hard,\n mat_use_trans,\n mat_alpha,\n mat_cloudy,\n mat_IOR,\n mat_mossy,\n num_of_rocks,\n user_seed,\n False,\n use_random_seed\n ]\n\n return settings", "def close(self):\n self.maingui.save_config()\n super().close()", "def save_preset(self, filename, options, REQUEST=None):\r\n\r\n # TODO presets.py - save_preset - specify options parameter to specify which entries, configuration, frontends, etc. to include\r\n\r\n raise NotImplementedError", "def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def copyout_options(self, mw):\n opts = mw.opts\n opts.xrange.lower = self.xrangemin.value()\n opts.xrange.upper = self.xrangemax.value()\n opts.yrange.lower = self.yrangemin.value()\n opts.yrange.upper = self.yrangemax.value()\n opts.intparams.background.lower = self.bgintmin.value()\n opts.intparams.background.upper = self.bgintmax.value()\n opts.intparams.peak.lower = self.halphamin.value()\n opts.intparams.peak.upper = self.halphamax.value()\n opts.intparams.apply_doppler = self.doppleradj.isChecked()", "def undo_settings(self):\r\n cF.undo_settings()", "def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default", "def save_settings(self, param_state):\n with open(CONFIG_DIR / self.name_parameters, 'wb') as f:\n pickle.dump(param_state, f)", "def savePrefs(*args, colors: bool=True, general: bool=True, hotkeys: bool=True, menuSets:\n bool=True, plugins: bool=True, uiLayout: bool=True, **kwargs)->None:\n pass", "def build_config(self, config):\n config.setdefaults('Makesmith Settings', {'COMport': 'COM5', 'xPitch': 20, 'openFile': \" \"})", "def RPC_OverlayDigitizationToolCfg(flags, name=\"Rpc_OverlayDigitizationTool\", **kwargs):\n kwargs.setdefault(\"OnlyUseContainerName\", False)\n kwargs.setdefault(\"OutputObjectName\", flags.Overlay.SigPrefix + \"RPC_DIGITS\")\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.SigPrefix + \"RPC_SDO\")\n return RPC_DigitizationToolCommonCfg(flags, name, **kwargs)", "def init_training_settings() -> bytes:\n data = {\"mark_up_source\": \"\", \"mark_up_entities\": [], \"bug_resolution\": []}\n return pickle.dumps(data)", "def wirebomb_config_save(self, filepath):\n scene = self.set_as_active()\n config = configparser.ConfigParser()\n\n config['WIREFRAME TYPE'] = {'wireframe_method': scene.wirebomb.wireframe_method}\n\n config['CHECKBOXES'] = {'cb_backup': scene.wirebomb.cb_backup,\n 'cb_clear_rlayers': scene.wirebomb.cb_clear_rlayers,\n 'cb_clear_materials': scene.wirebomb.cb_clear_materials,\n 'cb_composited': scene.wirebomb.cb_composited,\n 'cb_only_selected': scene.wirebomb.cb_only_selected,\n 'cb_ao': scene.wirebomb.cb_ao,\n 'cb_clay': scene.wirebomb.cb_clay,\n 'cb_clay_only': scene.wirebomb.cb_clay_only,\n 'cb_mat_wire': scene.wirebomb.cb_mat_wire,\n 'cb_mat_clay': scene.wirebomb.cb_mat_clay}\n\n config['COLORS SET'] = {'color_wireframe': list(scene.wirebomb.color_wire),\n 'color_clay': list(scene.wirebomb.color_clay)}\n\n config['MATERIALS SET'] = {'wireframe': scene.wirebomb.material_wire,\n 'clay': scene.wirebomb.material_clay}\n\n config['SLIDERS'] = {'slider_wt_freestyle': scene.wirebomb.slider_wt_freestyle,\n 'slider_wt_modifier': scene.wirebomb.slider_wt_modifier}\n\n config['LAYERS SELECTED'] = {'layers_affected': list(scene.wirebomb.layers_affected),\n 'layers_other': list(scene.wirebomb.layers_other)}\n\n config['SCENE NAME SET'] = {'scene_name_1': scene.wirebomb.scene_name_1}\n\n with open(filepath, 'w') as configfile:\n config.write(configfile)", "def stage_cfg(self, stage_name: str):\n stage_cfg = self._app_cfg['stages'][stage_name]\n global_cfg = self._app_cfg['global']\n\n # Copy all of the global configuration items into the stage config section\n stage_cfg['parameters'].update(**global_cfg)\n return stage_cfg" ]
[ "0.72975564", "0.5729428", "0.56773335", "0.5671888", "0.5636887", "0.5510333", "0.5465108", "0.54372466", "0.54338557", "0.5410532", "0.53921604", "0.53159", "0.52870184", "0.52698493", "0.5225601", "0.5222329", "0.5184534", "0.5180232", "0.5179325", "0.517133", "0.516891", "0.51393145", "0.5114553", "0.5097077", "0.5089205", "0.50671285", "0.504586", "0.5023687", "0.50234926", "0.50170094", "0.50095403", "0.5003398", "0.5003285", "0.49993625", "0.49782953", "0.497468", "0.49698794", "0.49604696", "0.49416152", "0.49405304", "0.49391612", "0.49381736", "0.49350804", "0.492641", "0.4926291", "0.4920583", "0.4907137", "0.49056572", "0.49042964", "0.49033985", "0.48924828", "0.4887848", "0.4886946", "0.4883843", "0.4883702", "0.48765153", "0.48687688", "0.4865427", "0.48561874", "0.48485503", "0.48381427", "0.48374584", "0.48251364", "0.482485", "0.48222697", "0.48197424", "0.48194256", "0.48104227", "0.48094022", "0.48048267", "0.48023644", "0.47976023", "0.47970468", "0.47875157", "0.47868696", "0.47819293", "0.4777665", "0.47712237", "0.47708297", "0.4770667", "0.47680694", "0.47537944", "0.47517735", "0.4749874", "0.4749444", "0.47469082", "0.47451407", "0.47396058", "0.473126", "0.4730604", "0.47289795", "0.4728079", "0.4715361", "0.47152647", "0.47114366", "0.4701717", "0.46937913", "0.4683801", "0.467843", "0.46741223" ]
0.8537631
0
export data from scene, objects overrides in renderlayers.. etc
def exportData(self): lays = rlayer.renderlayers() data = {} for l in lays: if l.name == 'defaultRenderLayer': continue data[l.name] = {'objects':l.objects, # OBJECTS IN LAYER 'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES 'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS 'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER } pickle.dump( data, open( self.dataPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def objects_to_bmesh(objs, transform=True):\n\n # CAUTION: Removes/destroys custom layer props\n\n # Creates the mesh used to merge the entire scene\n bm_all = bmesh.new()\n\n # Adds the objects\" meshes to the bmesh\n for obj in objs:\n dprint(\"Preparing object {} for export...\".format(obj.name))\n # Creates a bmesh from the supplied object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Makes sure all layers exist so values don't get lost while exporting\n uv_layer = bm.loops.layers.uv.get(\"UVMap\")\n tex_layer = bm.faces.layers.tex.get(\"UVMap\")\n vc_layer = (bm.loops.layers.color.get(\"Col\") or\n bm.loops.layers.color.new(\"Col\"))\n env_layer = (bm.loops.layers.color.get(\"Env\") or\n bm.loops.layers.color.new(\"Env\"))\n env_alpha_layer = (bm.faces.layers.float.get(\"EnvAlpha\") or\n bm.faces.layers.float.new(\"EnvAlpha\"))\n va_layer = (bm.loops.layers.color.get(\"Alpha\") or\n bm.loops.layers.color.new(\"Alpha\"))\n texnum_layer = bm.faces.layers.int.get(\"Texture Number\")\n type_layer = (bm.faces.layers.int.get(\"Type\") or\n bm.faces.layers.int.new(\"Type\"))\n material_layer = (bm.faces.layers.int.get(\"Material\") or\n bm.faces.layers.int.new(\"Material\"))\n\n # Removes the parent for exporting and applies transformation\n parent = obj.parent\n if parent:\n mat = obj.matrix_world.copy()\n old_mat = obj.matrix_basis.copy()\n obj.parent = None\n obj.matrix_world = mat\n\n spc = obj.matrix_basis\n bmesh.ops.scale(\n bm,\n vec=obj.scale,\n space=spc,\n verts=bm.verts\n )\n if transform:\n bmesh.ops.transform(\n bm,\n matrix=Matrix.Translation(obj.location),\n space=spc,\n verts=bm.verts\n )\n bmesh.ops.rotate(\n bm,\n cent=obj.location,\n matrix=obj.rotation_euler.to_matrix(),\n space=spc,\n verts=bm.verts\n )\n\n # Restores the parent relationship\n if parent and not obj.parent:\n obj.parent = parent\n obj.matrix_basis = old_mat\n\n # Converts the transformed bmesh to mesh\n new_mesh = bpy.data.meshes.new(\"ncp_export_temp\")\n bm.to_mesh(new_mesh)\n\n # Adds the transformed mesh to the big bmesh\n bm_all.from_mesh(new_mesh)\n\n # Removes unused meshes\n bpy.data.meshes.remove(new_mesh, do_unlink=True)\n bm.free()\n\n return bm_all", "def afterLoadSceneObject(self):\n\t\tpass", "def save_scene(force=True, **kwargs):\n\n pass", "def dump_objects():\n pass", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def loadMultiple(method, *args):\n\n ### Declaring attributes\n selectedCurve = selectedMesh = None\n minRangeX = minRangeY = minRangeZ = maxRangeX = maxRangeY = maxRangeZ = 0\n selectedObjects = []\n\n ### Query UI values\n # Choise between standin / assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # List of all asset icons on UI\n objectIconsList = cmds.layout(objectScroll, query=True, childArray=True)\n # Amount of copies\n buildingAmount = cmds.intSliderGrp(SpawnObjectsTab.BuildingAmount, query=True, value=True)\n # Deviation from original rotation\n rotationVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomRotation, query=True, value=True)\n # Deviation from original scale\n scaleVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomScale, query=True, value=True)\n\n ### Iterate over each asset icon\n for obj in objectIconsList:\n\n # Append to list if the asset is selected\n isSelected = cmds.iconTextCheckBox(obj, query=True, value=True)\n\n if isSelected:\n selectedObjects.append(cmds.iconTextCheckBox(obj, query=True, label=True))\n\n # Exit the function if no asset is selected\n if not selectedObjects:\n return\n \n # Reference to the function that will scatter the copies\n scatteringFunction = None\n\n ### The user chose \"curve\"\n if method == \"curve\":\n \n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnCurve\n\n # Get curve reference\n selectedCurve = cmds.ls(selection=True)\n if not selectedCurve:\n return\n selectedCurve = selectedCurve[0]\n\n ### The user chose \"range\"\n if method == \"range\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnRange\n\n # Query minimum values from floatField\n minValues = cmds.floatFieldGrp(SpawnObjectsTab.MinimumField, query=True, value=True)\n minRangeX, minRangeY, minRangeZ = minValues[0], minValues[1], minValues[2]\n # Query maximum values from floatField\n maxValues = cmds.floatFieldGrp(SpawnObjectsTab.MaximumField, query=True, value=True)\n maxRangeX, maxRangeY, maxRangeZ = maxValues[0], maxValues[1], maxValues[2]\n\n ### The user chose \"mesh\"\n if method == \"mesh\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnMesh\n\n # Get reference of selected object\n selectedMesh = cmds.ls(selection=True)\n if not selectedMesh:\n return\n selectedMesh = selectedMesh[0]\n\n # Create group for the spawned copies \n finalGroup = cmds.group(name=\"CurveAssetGroup\", empty=True)\n cmds.select(clear=True)\n\n ### Iterate over the generated positions of the function with given parameters\n # scatteringFunction is a reference to a function in ObjectScattering.py\n # these functions are generators, they yield a value and we can iterate\n # to get the next value generated.\n for position in scatteringFunction(objectCount=buildingAmount, curve=selectedCurve,\n minX=minRangeX, minY=minRangeY, minZ=minRangeZ, maxX=maxRangeX, maxY=maxRangeY, maxZ=maxRangeZ,\n mesh=selectedMesh):\n \n # Randomly instance an asset from the selectedObjects list\n asset = AssetIcon(random.choice(selectedObjects))\n loadedAssetNode = None\n\n # Create copy based on the mode selected by the user\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n\n # Move this copy to the generated position\n cmds.move(position[0], position[1], position[2], loadedAssetNode, absolute=True)\n\n # If there is a fourth index on the position, that means we have rotation info\n # use that info to rotate the asset.\n # It is used to match an objects rotation to a face normal.\n if len(position) == 4:\n cmds.rotate(position[3][0], position[3][1], position[3][2], loadedAssetNode, absolute=True)\n \n # Add random rotation\n angle = random.uniform(-rotationVariation, rotationVariation)\n cmds.rotate(angle, loadedAssetNode, y=True, relative=True, objectSpace=True)\n\n # Add random scale\n newScale = random.uniform(1, 1+scaleVariation)\n cmds.scale(newScale, newScale, newScale, loadedAssetNode, absolute=True)\n\n #cmds.FreezeTransformations(loadedAssetNode)\n\n # Parent copy to group\n cmds.parent(loadedAssetNode, finalGroup)", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def create_scene(self):\n \n self.scene=soya.World()", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n img_size = img.size\n img_size = (400,400)\n\n loader = loadjson\n \n data = loader(txt, self.objectsofinterest,img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float() \n\n if len(points_all) == 0:\n points_all = torch.zeros(1, 10, 2).double()\n \n # self.save == true assumes there is only \n # one object instance in the scene. \n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1,3).float()\n rotations = torch.zeros(1,4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name,'_camera_settings.json')\n with open(path_cam) as data_file: \n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3,3))\n matrix_camera[0,0] = cam['fx']\n matrix_camera[1,1] = cam['fy']\n matrix_camera[0,2] = cam['cx']\n matrix_camera[1,2] = cam['cy']\n matrix_camera[2,2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name,'_object_settings.json')\n with open(path_set) as data_file: \n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy() \n\n \n def Reproject(points,tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0]/2, img.size[1]/2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n \n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test: \n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n \n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key,(12, 115, 170),7) \n \n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1,point2],fill=lineColor,width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0]-pointRadius, point[1]-pointRadius, point[0]+pointRadius, point[1]+pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color = 0, color = None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color \n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) #lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n \n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n \n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255,255,255), pointRadius = 3)\n DrawDot(points[1], pointColor=(0,0,0), pointRadius = 3)\n\n # Draw all the found objects. \n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n \n return {\n \"img\":img,\n \"translations\":translations,\n \"rot_quaternions\":rotations,\n 'pointsBelief':np.array(points_all[0]),\n 'matrix_camera':matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name':name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap(\n img, \n pointsBelief=pointsBelief,\n nbpoints = 9,\n sigma = self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg),beliefsImg[0].size(1),beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n \n\n # Create affinity maps\n scale = 8\n if min (img.size) / 8.0 != min (img_size)/8.0:\n # print (scale)\n scale = min (img.size)/(min (img_size)/8.0)\n\n affinities = GenerateMapAffinity(img,8,pointsBelief,objects_centroid,scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0]+1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1]+1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0],self.normal[0],self.normal[0]),\n (self.normal[1],self.normal[1],self.normal[1])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n \n img = crop(img,h_crop,w_crop,img_size[1],img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop/8)\n h_crop = int(h_crop/8)\n\n affinities = affinities[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n beliefs = beliefs[:,h_crop:h_crop+int(img_size[1]/8),w_crop:w_crop+int(img_size[0]/8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,1,50)],dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities,torch.zeros(16,50,1)],dim=2)\n\n return {\n 'img':img, \n \"affinities\":affinities, \n 'beliefs':beliefs,\n }", "def export3DModel(self, fileName, filePath, fileFormat=\".step\", object_list=[], removed_objects=[]):\n if not object_list:\n allObjects = self.modeler.primitives.object_names\n if removed_objects:\n for rem in removed_objects:\n allObjects.remove(rem)\n else:\n if \"Region\" in allObjects:\n allObjects.remove(\"Region\")\n else:\n allObjects = object_list[:]\n\n self.add_info_message(\"Exporting {} objects\".format(len(allObjects)))\n\n stringa = \",\".join(allObjects)\n arg = [\n \"NAME:ExportParameters\",\n \"AllowRegionDependentPartSelectionForPMLCreation:=\",\n True,\n \"AllowRegionSelectionForPMLCreation:=\",\n True,\n \"Selections:=\",\n stringa,\n \"File Name:=\",\n str(filePath) + \"/\" + str(fileName) + str(fileFormat),\n \"Major Version:=\",\n -1,\n \"Minor Version:=\",\n -1,\n ]\n\n self.modeler.oeditor.Export(arg)\n return True", "def show(data_objects, **options):\n if not is_loaded():\n return data_objects\n\n # (else)\n if not hasattr(data_objects, '__iter__'):\n data_objects = [data_objects]\n\n # print(data_objects)\n scene = pygeojs.scene(**options)\n scene.createLayer('osm')\n\n if not data_objects:\n print('No data objects')\n return scene\n\n # feature_layer = scene.createLayer('feature')\n feature_layer = None\n\n combined_bounds = None\n # Reverse order so that first item ends on top\n for data_object in reversed(data_objects):\n if data_object._getdatatype() == gaia.types.VECTOR:\n # print('Adding vector object')\n # Special handling for vector datasets:\n # First, make a copy of the geopandas frame\n df = geopandas.GeoDataFrame.copy(data_object.get_data())\n\n # Convert to lon-lat if needed\n epsg = data_object.get_epsg()\n if epsg and str(epsg) != '4326':\n print('Converting crs')\n df[df.geometry.name] = df.geometry.to_crs(epsg='4326')\n\n # Strip any z coordinates (force to z = 1)\n df.geometry = df.geometry.scale(zfact=0.0).translate(zoff=1.0)\n # df.to_file('/home/john/temp/df.pandas')\n # print(df)\n # print(df.geometry)\n\n # Calculate bounds\n geopandas_bounds = df.geometry.total_bounds\n xmin, ymin, xmax, ymax = geopandas_bounds\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Add map feature\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n # Use __geo_interface__ to get the geojson\n feature_layer.readGeoJSON(df.__geo_interface__)\n # print(df.__geo_interface__)\n else:\n # Get bounds, in order to compute overall bounds\n meta = data_object.get_metadata()\n # print('meta: {}'.format(meta))\n # print(meta)\n raster_bounds = meta.get('bounds').get('coordinates')[0]\n # print(meta_bounds)\n assert raster_bounds, 'data_object missing bounds'\n\n # meta bounds inconsistent between sources, so compute brute force\n xvals, yvals = zip(*raster_bounds)\n xmin, xmax = min(xvals), max(xvals)\n ymin, ymax = min(yvals), max(yvals)\n meta_bounds = [\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]\n\n # Bounds format is [xmin, ymin, xmax, ymax]\n bounds = [\n meta_bounds[0][0], meta_bounds[0][1],\n meta_bounds[2][0], meta_bounds[2][1]\n ]\n\n # print(bounds)\n if combined_bounds is None:\n combined_bounds = bounds\n else:\n combined_bounds[0] = min(combined_bounds[0], bounds[0])\n combined_bounds[1] = min(combined_bounds[1], bounds[1])\n combined_bounds[2] = max(combined_bounds[2], bounds[2])\n combined_bounds[3] = max(combined_bounds[3], bounds[3])\n\n # print('options:', options)\n rep = options.get('representation')\n if rep == 'outline':\n # Create polygon object\n rect = [\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n ]\n geojs_polygon = geojson.Polygon([rect])\n properties = {\n 'fillColor': '#fff',\n 'fillOpacity': 0.1,\n 'stroke': True,\n 'strokeColor': '#333',\n 'strokeWidth': 2\n }\n geojson_feature = geojson.Feature(\n geometry=geojs_polygon, properties=properties)\n geojson_collection = geojson.FeatureCollection([geojson_feature])\n # print(geojson_collection)\n\n if feature_layer is None:\n feature_layer = scene.createLayer('feature')\n\n feature_layer.createFeature(\n 'geojson', geojson_collection, **options)\n\n elif data_object.__class__.__name__ == 'GirderDataObject':\n if data_object._getdatatype() == 'raster':\n # Use large-image display\n # Todo - verify that it is installed\n tiles_url = data_object._get_tiles_url()\n # print('tiles_url', tiles_url)\n opacity = 1.0\n if hasattr(data_object, 'opacity'):\n opacity = data_object.opacity\n scene.createLayer(\n 'osm', url=tiles_url, keepLower=False, opacity=opacity)\n else:\n raise GaiaException(\n 'Cannot display GirderDataObject with data type {}'.format(\n data_object._getdatatype()))\n\n elif data_object._getdatatype() == gaia.types.VECTOR:\n pass # vector objects handled above\n else:\n msg = 'Cannot display dataobject, type {}'.format(\n data_object.__class__.__name__)\n raise GaiaException(msg)\n\n # Send custom message to (javascript) client to set zoom & center\n rpc = {'method': 'set_zoom_and_center', 'params': combined_bounds}\n scene.send(rpc)\n return scene", "def export_world(file, world, scene, global_matrix, tab_write):\n render = scene.pov\n agnosticrender = scene.render\n camera = scene.camera\n # matrix = global_matrix @ camera.matrix_world # view dependant for later use NOT USED\n if not world:\n return\n\n # These lines added to get sky gradient (visible with PNG output)\n\n # For simple flat background:\n if not world.pov.use_sky_blend:\n # No alpha with Sky option:\n if render.alpha_mode == \"SKY\" and not agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 0>}\\n\" % (world.pov.horizon_color[:])\n )\n\n elif render.alpha_mode == \"STRAIGHT\" or agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 1>}\\n\" % (world.pov.horizon_color[:])\n )\n else:\n # Non fully transparent background could premultiply alpha and avoid\n # anti-aliasing display issue\n tab_write(\n file,\n \"background {rgbft<%.3g, %.3g, %.3g, %.3g, 0>}\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n\n world_tex_count = 0\n # For Background image textures\n for t in world.pov_texture_slots: # risk to write several sky_spheres but maybe ok.\n if t:\n tex = bpy.data.textures[t.texture]\n if tex.type is not None:\n world_tex_count += 1\n # XXX No enable checkbox for world textures yet (report it?)\n # if t and tex.type == 'IMAGE' and t.use:\n if tex.type == \"IMAGE\":\n image_filename = path_image(tex.image)\n if tex.image.filepath != image_filename:\n tex.image.filepath = image_filename\n if image_filename != \"\" and t.use_map_blend:\n textures_blend = image_filename\n # colvalue = t.default_value\n t_blend = t\n\n # Commented below was an idea to make the Background image oriented as camera\n # taken here:\n # http://news.pov.org/pov.newusers/thread/%3Cweb.4a5cddf4e9c9822ba2f93e20@news.pov.org%3E/\n # Replace 4/3 by the ratio of each image found by some custom or existing\n # function\n # mapping_blend = (\" translate <%.4g,%.4g,%.4g> rotate z*degrees\" \\\n # \"(atan((camLocation - camLookAt).x/(camLocation - \" \\\n # \"camLookAt).y)) rotate x*degrees(atan((camLocation - \" \\\n # \"camLookAt).y/(camLocation - camLookAt).z)) rotate y*\" \\\n # \"degrees(atan((camLocation - camLookAt).z/(camLocation - \" \\\n # \"camLookAt).x)) scale <%.4g,%.4g,%.4g>b\" % \\\n # (t_blend.offset.x / 10 , t_blend.offset.y / 10 ,\n # t_blend.offset.z / 10, t_blend.scale.x ,\n # t_blend.scale.y , t_blend.scale.z))\n # using camera rotation valuesdirectly from blender seems much easier\n if t_blend.texture_coords == \"ANGMAP\":\n mapping_blend = \"\"\n else:\n # POV-Ray \"scale\" is not a number of repetitions factor, but its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # UV scale is 0.5,0.5 in blender and 0,0 in POV\n # Further Scale by 2 and translate by -1 are\n # required for the sky_sphere not to repeat\n\n mapping_blend = (\n \"scale 2 scale <%.4g,%.4g,%.4g> translate -1 \"\n \"translate <%.4g,%.4g,%.4g> rotate<0,0,0> \"\n % (\n (1.0 / t_blend.scale.x),\n (1.0 / t_blend.scale.y),\n (1.0 / t_blend.scale.z),\n 0.5 - (0.5 / t_blend.scale.x) - t_blend.offset.x,\n 0.5 - (0.5 / t_blend.scale.y) - t_blend.offset.y,\n t_blend.offset.z,\n )\n )\n\n # The initial position and rotation of the pov camera is probably creating\n # the rotation offset should look into it someday but at least background\n # won't rotate with the camera now.\n # Putting the map on a plane would not introduce the skysphere distortion and\n # allow for better image scale matching but also some waay to chose depth and\n # size of the plane relative to camera.\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n tab_write(\n file,\n 'image_map{%s \"%s\" %s}\\n'\n % (image_format(textures_blend), textures_blend, img_map_bg(t_blend)),\n )\n tab_write(file, \"}\\n\")\n tab_write(file, \"%s\\n\" % mapping_blend)\n # The following layered pigment opacifies to black over the texture for\n # transmit below 1 or otherwise adds to itself\n tab_write(file, \"pigment {rgb 0 transmit %s}\\n\" % tex.intensity)\n tab_write(file, \"}\\n\")\n # tab_write(file, \"scale 2\\n\")\n # tab_write(file, \"translate -1\\n\")\n\n # For only Background gradient\n\n if world_tex_count == 0 and world.pov.use_sky_blend:\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n # maybe Should follow the advice of POV doc about replacing gradient\n # for skysphere..5.5\n tab_write(file, \"gradient y\\n\")\n tab_write(file, \"color_map {\\n\")\n\n if render.alpha_mode == \"TRANSPARENT\":\n tab_write(\n file,\n \"[0.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n tab_write(\n file,\n \"[1.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.zenith_color[0],\n world.pov.zenith_color[1],\n world.pov.zenith_color[2],\n render.alpha_filter,\n ),\n )\n if agnosticrender.film_transparent or render.alpha_mode == \"STRAIGHT\":\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.horizon_color[:]))\n # aa premult not solved with transmit 1\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.zenith_color[:]))\n else:\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.horizon_color[:]))\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.zenith_color[:]))\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n # Sky_sphere alpha (transmit) is not translating into image alpha the same\n # way as 'background'\n\n # if world.pov.light_settings.use_indirect_light:\n # scene.pov.radio_enable=1\n\n # Maybe change the above to a function copyInternalRenderer settings when\n # user pushes a button, then:\n # scene.pov.radio_enable = world.pov.light_settings.use_indirect_light\n # and other such translations but maybe this would not be allowed either?\n\n # -----------------------------------------------------------------------------\n\n mist = world.mist_settings\n\n if mist.use_mist:\n tab_write(file, \"fog {\\n\")\n if mist.falloff == \"LINEAR\":\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) * 0.368))\n elif mist.falloff in [\"QUADRATIC\", \"INVERSE_QUADRATIC\"]: # n**2 or squrt(n)?\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) ** 2 * 0.368))\n tab_write(\n file,\n \"color rgbt<%.3g, %.3g, %.3g, %.3g>\\n\"\n % (*world.pov.horizon_color, (1.0 - mist.intensity)),\n )\n # tab_write(file, \"fog_offset %.6f\\n\" % mist.start) #create a pov property to prepend\n # tab_write(file, \"fog_alt %.6f\\n\" % mist.height) #XXX right?\n # tab_write(file, \"turbulence 0.2\\n\")\n # tab_write(file, \"turb_depth 0.3\\n\")\n tab_write(file, \"fog_type 1\\n\") # type2 for height\n tab_write(file, \"}\\n\")\n if scene.pov.media_enable:\n tab_write(file, \"media {\\n\")\n tab_write(\n file,\n \"scattering { %d, rgb %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (\n int(scene.pov.media_scattering_type),\n scene.pov.media_diffusion_scale,\n *(scene.pov.media_diffusion_color[:]),\n ),\n )\n if scene.pov.media_scattering_type == \"5\":\n tab_write(file, \"eccentricity %.3g\\n\" % scene.pov.media_eccentricity)\n tab_write(file, \"}\\n\")\n tab_write(\n file,\n \"absorption %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (scene.pov.media_absorption_scale, *(scene.pov.media_absorption_color[:])),\n )\n tab_write(file, \"\\n\")\n tab_write(file, \"samples %.d\\n\" % scene.pov.media_samples)\n tab_write(file, \"}\\n\")", "def get_objects_data(self):\n pass", "def create_scene_obs(name, dimension, is_mesh, mesh_file, orientation, z_offset):\n obs_dict = {}\n obs_dict['name'] = name #string\n obs_dict['dim'] = dimension\n obs_dict['is_mesh'] = is_mesh\n obs_dict['mesh_file'] = mesh_file\n obs_dict['orientation'] = orientation\n obs_dict['z_offset'] = z_offset\n return obs_dict", "def exports():", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def load(self):\n\n if self.loaded:\n return\n\n self.region_back = None\n self.objects = []\n self.plants = []\n self.tiles = []\n\n # Some convenience vars\n materials = self.data.materials\n matmods = self.data.matmods\n objects = self.data.objects\n plants = self.data.plants\n world = self.world\n self.loaded = True\n\n # Get tiles\n try:\n data_tiles = world.get_tiles(self.rx, self.ry)\n except KeyError:\n print('WARNING: Region ({}, {}) was not found in world'.format(self.rx, self.ry))\n return\n\n # \"real\" coordinates\n base_x = self.rx*32\n gui_x = base_x*8\n base_y = self.ry*32\n gui_y = (world.height*8)-(base_y*8)\n\n # Background for our drawn area (black)\n self.region_back = self.scene.addRect(gui_x, gui_y-255, 255, 255,\n QtGui.QPen(QtGui.QColor(0, 0, 0)),\n QtGui.QBrush(QtGui.QColor(0, 0, 0)),\n )\n self.region_back.setZValue(Constants.z_black)\n\n # Tiles!\n cur_row = 0\n cur_col = 0\n for data_tile in data_tiles:\n self.tiles.append(GUITile(self.scene, data_tile,\n base_x+cur_col, base_y+cur_row,\n self,\n gui_x+cur_col*8, gui_y-(cur_row+1)*8,\n self.layer_toggles))\n self.scene.addItem(self.tiles[-1])\n cur_col += 1\n if cur_col == 32:\n cur_col = 0\n cur_row += 1\n\n # Entities!\n entities = []\n try:\n entities = world.get_entities(self.rx, self.ry)\n except KeyError:\n pass\n\n for e in entities:\n if e.name == 'ObjectEntity':\n obj_name = e.data['name']\n obj_orientation = e.data['orientationIndex']\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n if obj_name in objects:\n obj = objects[obj_name]\n (image, offset_x, offset_y) = obj.get_image(obj_orientation)\n qpmi = QtWidgets.QGraphicsPixmapItem(image)\n qpmi.setPos(\n (obj_x*8) + offset_x,\n (world.height*8)-(obj_y*8) - offset_y - image.height(),\n )\n qpmi.setZValue(Constants.z_objects)\n if not self.layer_toggles.objects_toggle.isChecked():\n qpmi.setVisible(False)\n self.scene.addItem(qpmi)\n self.objects.append(qpmi)\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_object(obj, obj_name, obj_orientation, qpmi, e.data)\n elif e.name == 'PlantEntity':\n desc = e.data['descriptions']['description']\n images = []\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n for piece in e.data['pieces']:\n piece_img = piece['image'].split('?')[0]\n if piece_img in plants:\n img = plants[piece_img].image\n qpmi = QtWidgets.QGraphicsPixmapItem(img)\n qpmi.setPos(\n (obj_x*8) + (piece['offset'][0]*8),\n (world.height*8)-(obj_y*8) - (piece['offset'][1]*8) - img.height(),\n )\n qpmi.setZValue(Constants.z_plants)\n if not self.layer_toggles.plants_toggle.isChecked():\n qpmi.setVisible(False)\n images.append((plants[piece_img], qpmi))\n self.scene.addItem(qpmi)\n self.plants.append(qpmi)\n else:\n print('not found: {}'.format(piece_img))\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_plant(desc, images)\n elif (e.name == 'MonsterEntity'\n or e.name == 'NpcEntity'\n or e.name == 'StagehandEntity'\n or e.name == 'ItemDropEntity'\n or e.name == 'VehicleEntity'\n ):\n # TODO: Ignoring for now\n pass\n else:\n print('Unknown entity type: {}'.format(e.name))", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def __init__(self, meta: SceneDescription):\n super().__init__(meta)\n self.scenes = []\n self.nodes = []\n self.meshes = []\n self.materials = []\n self.images = []\n self.samplers = []\n self.textures = []\n\n self.path = None\n self.scene = None\n self.gltf = None", "def __getitem__(self, index):\n path, name, txt = self.imgs[index]\n img = self.loader(path)\n\n # img_size = (400, 400)\n img_size = (self.img_size, self.img_size)\n \n\n loader = loadjson\n\n data = loader(txt, self.objectsofinterest, img)\n\n pointsBelief = data['pointsBelief'] \n objects_centroid = data['centroids']\n points_all = data['points']\n points_keypoints = data['keypoints_2d']\n translations = torch.from_numpy(np.array(\n data['translations'])).float()\n rotations = torch.from_numpy(np.array(\n data['rotations'])).float()\n\n if len(points_all) == 0:\n # points_all = torch.zeros(1, 10, 2).double()\n points_all = torch.zeros(1)\n\n # self.save == true assumes there is only\n # one object instance in the scene.\n if translations.size()[0] > 1:\n translations = translations[0].unsqueeze(0)\n rotations = rotations[0].unsqueeze(0)\n\n # If there are no objects, still need to return similar shape array\n if len(translations) == 0:\n translations = torch.zeros(1, 3).float()\n rotations = torch.zeros(1, 4).float()\n\n # Camera intrinsics\n path_cam = path.replace(name, '_camera_settings.json')\n with open(path_cam) as data_file:\n data = json.load(data_file)\n # Assumes one camera\n cam = data['camera_settings'][0]['intrinsic_settings']\n\n matrix_camera = np.zeros((3, 3))\n matrix_camera[0, 0] = cam['fx']\n matrix_camera[1, 1] = cam['fy']\n matrix_camera[0, 2] = cam['cx']\n matrix_camera[1, 2] = cam['cy']\n matrix_camera[2, 2] = 1\n\n # Load the cuboid sizes\n path_set = path.replace(name, '_object_settings.json')\n with open(path_set) as data_file:\n data = json.load(data_file)\n\n cuboid = torch.zeros(1)\n\n if self.objectsofinterest is None:\n cuboid = np.array(data['exported_objects'][0]['cuboid_dimensions'])\n else:\n for info in data[\"exported_objects\"]:\n if self.objectsofinterest in info['class']:\n cuboid = np.array(info['cuboid_dimensions'])\n\n img_original = img.copy()\n\n def Reproject(points, tm, rm):\n \"\"\"\n Reprojection of points when rotating the image\n \"\"\"\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid\n\n # Random image manipulation, rotation and translation with zero padding\n dx = round(np.random.normal(0, 2) * float(self.random_translation[0]))\n dy = round(np.random.normal(0, 2) * float(self.random_translation[1]))\n angle = round(np.random.normal(0, 1) * float(self.random_rotation))\n\n tm = np.float32([[1, 0, dx], [0, 1, dy]])\n rm = cv2.getRotationMatrix2D(\n (img.size[0] / 2, img.size[1] / 2), angle, 1)\n\n for i_objects in range(len(pointsBelief)):\n points = pointsBelief[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n pointsBelief[i_objects] = new_cuboid.tolist()\n objects_centroid[i_objects] = tuple(new_cuboid.tolist()[-1])\n pointsBelief[i_objects] = list(map(tuple, pointsBelief[i_objects]))\n\n for i_objects in range(len(points_keypoints)):\n points = points_keypoints[i_objects]\n new_cuboid = Reproject(points, tm, rm)\n points_keypoints[i_objects] = new_cuboid.tolist()\n points_keypoints[i_objects] = list(map(tuple, points_keypoints[i_objects]))\n\n image_r = cv2.warpAffine(np.array(img), rm, img.size)\n result = cv2.warpAffine(image_r, tm, img.size)\n img = Image.fromarray(result)\n\n # Note: All point coordinates are in the image space, e.g., pixel value.\n # This is used when we do saving --- helpful for debugging\n if self.save or self.test:\n # Use the save to debug the data\n if self.test:\n draw = ImageDraw.Draw(img_original)\n else:\n draw = ImageDraw.Draw(img)\n\n # PIL drawing functions, here for sharing draw\n def DrawKeypoints(points):\n for key in points:\n DrawDot(key, (12, 115, 170), 7)\n\n def DrawLine(point1, point2, lineColor, lineWidth):\n if not point1 is None and not point2 is None:\n draw.line([point1, point2], fill=lineColor, width=lineWidth)\n\n def DrawDot(point, pointColor, pointRadius):\n if not point is None:\n xy = [point[0] - pointRadius, point[1] - pointRadius, point[0] + pointRadius,\n point[1] + pointRadius]\n draw.ellipse(xy, fill=pointColor, outline=pointColor)\n\n def DrawCube(points, which_color=0, color=None):\n '''Draw cube with a thick solid line across the front top edge.'''\n lineWidthForDrawing = 2\n lineColor1 = (255, 215, 0) # yellow-ish\n lineColor2 = (12, 115, 170) # blue-ish\n lineColor3 = (45, 195, 35) # green-ish\n if which_color == 3:\n lineColor = lineColor3\n else:\n lineColor = lineColor1\n\n if not color is None:\n lineColor = color\n\n # draw front\n DrawLine(points[0], points[1], lineColor, 8) # lineWidthForDrawing)\n DrawLine(points[1], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[2], lineColor, lineWidthForDrawing)\n DrawLine(points[3], points[0], lineColor, lineWidthForDrawing)\n\n # draw back\n DrawLine(points[4], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[5], lineColor, lineWidthForDrawing)\n DrawLine(points[6], points[7], lineColor, lineWidthForDrawing)\n DrawLine(points[4], points[7], lineColor, lineWidthForDrawing)\n\n # draw sides\n DrawLine(points[0], points[4], lineColor, lineWidthForDrawing)\n DrawLine(points[7], points[3], lineColor, lineWidthForDrawing)\n DrawLine(points[5], points[1], lineColor, lineWidthForDrawing)\n DrawLine(points[2], points[6], lineColor, lineWidthForDrawing)\n\n # draw dots\n DrawDot(points[0], pointColor=(255, 255, 255), pointRadius=3)\n DrawDot(points[1], pointColor=(0, 0, 0), pointRadius=3)\n\n # Draw all the found objects.\n for points_belief_objects in pointsBelief:\n DrawCube(points_belief_objects)\n for keypoint in points_keypoints:\n DrawKeypoints(keypoint)\n\n img = self.transform(img)\n\n return {\n \"img\": img,\n \"translations\": translations,\n \"rot_quaternions\": rotations,\n 'pointsBelief': np.array(points_all[0]),\n 'matrix_camera': matrix_camera,\n 'img_original': np.array(img_original),\n 'cuboid': cuboid,\n 'file_name': name,\n }\n\n # Create the belief map\n beliefsImg = CreateBeliefMap( #TODO: Investigate generating belief maps\n img,\n pointsBelief=pointsBelief,\n nbpoints=9,\n sigma=self.sigma)\n\n # Create the image maps for belief\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n for j in range(len(beliefsImg)):\n beliefsImg[j] = self.target_transform(beliefsImg[j])\n # beliefsImg[j].save('{}.png'.format(j))\n beliefsImg[j] = totensor(beliefsImg[j])\n\n beliefs = torch.zeros((len(beliefsImg), beliefsImg[0].size(1), beliefsImg[0].size(2)))\n for j in range(len(beliefsImg)):\n beliefs[j] = beliefsImg[j][0]\n\n # Create affinity maps\n scale = 8\n if min(img.size) / 8.0 != min(img_size) / 8.0:\n # print (scale)\n scale = min(img.size) / (min(img_size) / 8.0)\n\n affinities = GenerateMapAffinity(img, 8, pointsBelief, objects_centroid, scale)\n img = self.transform(img)\n\n # Transform the images for training input\n w_crop = np.random.randint(0, img.size[0] - img_size[0] + 1)\n h_crop = np.random.randint(0, img.size[1] - img_size[1] + 1)\n transform = transforms.Compose([transforms.Resize(min(img_size))])\n totensor = transforms.Compose([transforms.ToTensor()])\n\n # if not self.normal is None:\n # normalize = transforms.Compose([transforms.Normalize\n # ((self.normal[0],self.normal[0],self.normal[0]),\n # (self.normal[1],self.normal[1],self.normal[1])),\n # AddNoise(self.noise)])\n\n if not self.normal is None:\n normalize = transforms.Compose([transforms.Normalize\n ((self.normal[0][0], self.normal[0][1], self.normal[0][2]),\n (self.normal[1][0], self.normal[1][1], self.normal[1][2])),\n AddNoise(self.noise)])\n else:\n normalize = transforms.Compose([AddNoise(0.0001)])\n\n img = crop(img, h_crop, w_crop, img_size[1], img_size[0])\n img = totensor(img)\n\n img = normalize(img)\n\n w_crop = int(w_crop / 8)\n h_crop = int(h_crop / 8)\n\n affinities = affinities[:, h_crop:h_crop + int(img_size[1] / 8), w_crop:w_crop + int(img_size[0] / 8)]\n beliefs = beliefs[:, h_crop:h_crop + int(img_size[1] / 8), w_crop:w_crop + int(img_size[0] / 8)]\n\n if affinities.size()[1] == 49 and not self.test:\n affinities = torch.cat([affinities, torch.zeros(16, 1, 50)], dim=1)\n\n if affinities.size()[2] == 49 and not self.test:\n affinities = torch.cat([affinities, torch.zeros(16, 50, 1)], dim=2)\n\n return {\n 'img': img,\n \"affinities\": affinities,\n 'beliefs': beliefs,\n }", "def __init__(self, name, directory, model, anim_data = dict(), descriptor = None, **commands):\n\n self.name = name # this is the name of the file\n self.directory = directory # the path that the file is supposed to be located at\n self.Model = model # this is the main model file for the entire scene.\n self.anim_data = anim_data # animation data (defaults to None)\n self.descriptor = descriptor\n\n self.fix_names()\n\n # assign each of the input streams to a variable\n self.index_stream = []\n self.vertex_stream = []\n self.uv_stream = []\n self.n_stream = []\n self.t_stream = []\n self.chvertex_stream = []\n self.materials = set() # this will hopefully mean that there will be at most one copy of each unique TkMaterialData struct in the set\n\n #self.Entities = [] # a list of any extra properties to go in each entity\n\n # extract the streams from the mesh objects.\n index = 0\n for mesh in self.Model.ListOfMeshes:\n self.index_stream.append(mesh.Indexes)\n self.vertex_stream.append(mesh.Vertices)\n self.uv_stream.append(mesh.UVs)\n self.n_stream.append(mesh.Normals)\n self.t_stream.append(mesh.Tangents)\n self.chvertex_stream.append(mesh.CHVerts)\n # also add in the material data to the list\n if mesh.Material is not None:\n self.materials.add(mesh.Material)\n mesh.ID = index # assign the index location of the data to the Object so that it knows where its data is\n index += 1\n #for obj in self.Model.ListOfEntities:\n # self.Entities.append(obj.EntityData)\n\n self.num_mesh_objs = index # this is the total number of objects that have mesh data\n\n self.mesh_data = [dict()]*self.num_mesh_objs # an empty list of dicts that will ber populated then each entry will\n # be given back to the correct Mesh or Collision object\n\n self.preprocess_streams()\n\n # generate some variables relating to the paths\n self.path = os.path.join(BASEPATH, self.directory, self.name) # the path location including the file name.\n self.texture_path = os.path.join(self.path, 'TEXTURES')\n self.anims_path = os.path.join(BASEPATH, self.directory, 'ANIMS')\n self.ent_path = os.path.join(self.path, 'ENTITIES') # path location of the entity folder. Calling makedirs of this will ensure all the folders are made in one go\n\n self.create_paths()\n\n # This dictionary contains all the information for the geometry file \n self.GeometryData = dict()\n\n # This will just be some default entity with physics data\n self.TkAttachmentData = TkAttachmentData() # this is created with the Physics Component Data by default\n self.TkAttachmentData.make_elements(main=True)\n\n self.process_data()\n\n self.get_bounds()\n\n self.create_vertex_layouts() # this creates the VertexLayout and SmallVertexLayout properties\n\n # Material defaults\n self.process_materials()\n\n self.process_nodes()\n\n self.mix_streams() # make this last to make sure flattening each stream doesn't affect other data.\n\n # Assign each of the class objects that contain all of the data their data\n self.TkGeometryData = TkGeometryData(**self.GeometryData)\n self.TkGeometryData.make_elements(main=True)\n self.Model.construct_data()\n self.TkSceneNodeData = self.Model.get_data()\n self.TkSceneNodeData.make_elements(main=True) # get the model to create all the required data and this will continue on down the tree\n if len(self.descriptor) != 0:\n self.descriptor = self.descriptor.to_exml()\n self.descriptor.make_elements(main = True)\n else:\n self.descriptor = None\n for material in self.materials:\n if type(material) != str:\n material.make_elements(main=True)\n\n for anim_name in list(self.anim_data.keys()):\n self.anim_data[anim_name].make_elements(main=True)\n\n # write all the files\n self.write()\n\n # convert all the created exml files to mbin files\n if not commands.get('dont_compile', False):\n self.convert_to_mbin()", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def export(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net') #+ name)\n export_path = os.path.join(self.configuration['export_path'], 'exported_net_{}.pth'.format(name))\n batch_fixed = self.input[:,1,:,:,:]\n batch_moving = self.input[:,2,:,:,:]\n traced_script_module = torch.jit.trace(net, (batch_moving, batch_fixed))\n traced_script_module.save(export_path)", "def save(self, _name):\r\n try:\r\n with open(_name, 'w+') as fout:\r\n fout.write(\".cube file generated from prt_esolv.py\\n\")\r\n fout.write(f\"{_name}\\n\")\r\n\r\n fout.write(\r\n f\"{int(self.n_atoms)} {float(self.origin[0])} {float(self.origin[1])} {float(self.origin[2])}\\n\")\r\n\r\n fout.write(f\"{int(self.n_x)} {float(self.x[0])} {float(self.x[1])} {float(self.x[2])}\\n\")\r\n fout.write(f\"{int(self.n_y)} {float(self.y[0])} {float(self.y[1])} {float(self.y[2])}\\n\")\r\n fout.write(f\"{int(self.n_z)} {float(self.z[0])} {float(self.z[1])} {float(self.z[2])}\\n\")\r\n\r\n for atom, xyz in zip(self.atoms, self.atoms_xyz):\r\n fout.write(f\"{atom} 0 {xyz[0]} {xyz[1]} {xyz[2]}\\n\")\r\n\r\n for ix in range(self.n_x):\r\n for iy in range(self.n_y):\r\n for iz in range(self.n_z):\r\n fout.write(f\"{self.data[ix][iy][iz]}\")\r\n if iz % 6 == 5:\r\n fout.write('\\n')\r\n fout.write(\"\\n\")\r\n except IOError:\r\n print(f\"Can't create {_name} file!!!\")\r\n raise\r\n\r\n return None", "def import_scene(file_path):\n\n pass", "def execute (self, context):\n scene_path = self.properties.scene_path\n data_path = self.properties.data_path\n scale = self.properties.scale\n base_dir = self.properties.base_dir\n if not scene_path:\n self.report ({'ERROR'}, \"scene path unset\")\n return {'CANCELLED'}\n elif not data_path:\n self.report ({'ERROR'}, \"data path unset\")\n return {'CANCELLED'}\n elif not base_dir:\n self.report ({'ERROR'}, \"base dir unset\")\n return {'CANCELLED'}\n \n objs = get_selected_objects (context)\n exporter = dsf_prop_create.prop_exporter\\\n (scene_path = scene_path, data_path = data_path, scale = scale,\n base_dir = base_dir, scene = context.scene)\n exporter.export_props (objs)\n log.info (\"export: %d objects to %s/%s, scale=%f\",\n len (objs), scene_path, data_path, scale)\n return {'FINISHED'}", "def dump_data(self,filename,dump_id):\n # get pure data copy\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( data, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def read(scene_name):\n routes, fixtures = read_fixtures(scene_name)\n scene = build_scene_from_fixtures(fixtures, scene_name)\n write_to_json(scene, scene_name)\n if routes:\n write_to_json(build_routes_file(routes, scene_name), scene_name + \"-routes\")", "def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub0('MODL',self.model)\n if self.title: out.packSub0('FNAM',self.title)\n out.packSub('BKDT','f4i',\n self.weight, self.value, self.isScroll, self.teaches, self.enchantPoints)\n if self.script: out.packSub0('SCRI',self.script)\n if self.icon: out.packSub0('ITEX',self.icon)\n if self.text: out.packSub0('TEXT',self.text)\n if self.enchant: out.packSub0('TEXT',self.enchant)", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def object_export(request, simulation, object_name):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = \\\n 'attachement; filename={}.tsv'.format(metro_to_user(object_name))\n # We delete the export file to save disk space.\n os.remove(filename)\n return response", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def poseReaderRig(objs,space=1,name=None, nameOverride=0):\n if not pm.pluginInfo(\"poseReader\",q=1,loaded=1):\n pm.loadPlugin(\"poseReader.so\")\n if len(objs)<=0:\n pm.error((\"poseReaderUI: You must select one or more objects to create a poseReader node for!\"),sl=0)\n poses=[]\n # Store created nodes for sel at end\n obj=''\n for obj in objs:\n Obj=pm.util.capitalize(obj)\n # new to maya 6, tho it is a script....\n if name == None:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj + \"Shape#\"))\n else:\n if nameOverride==0:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj+name+'Shape'))\n elif nameOverride==1:\n if name[-5:]=='Shape':\n pose=pm.createNode(\"poseReader\",n=name)\n else:\n pose=pm.createNode(\"poseReader\",n=name+'Shape')\n attr=\"worldMatrix\"\n if space == 2:\n attr=\"matrix\"\n\n pm.connectAttr((obj + \".\" + attr),(pose + \".worldMatrixLiveIn\"),f=1)\n xform=pm.listRelatives(pose,p=1)[0]\n pm.connectAttr((xform + \".\" + attr),(pose + \".worldMatrixPoseIn\"),f=1)\n poses.append(xform)\n # Actually store xform for sel.\n # Make a keyable attr people can actually see and use.\n pm.addAttr(pose,ln=\"weight\",k=1)\n pm.connectAttr((pose + \".outWeight\"),(pose + \".weight\"),f=1)\n # Parent to same parent that object has.\n # Very important if using local space.\n parent=pm.listRelatives(obj,p=1)[0]\n if parent != \"\":\n pm.parent(xform,parent)\n # match rotate order of obj\n rotOrder = pm.getAttr(obj+'.rotateOrder')\n xform.attr('rotateOrder').set(rotOrder)\n # Snap xform to same as obj\n pCons=pm.pointConstraint(obj,xform,w=1)\n oCons=pm.orientConstraint(obj,xform,w=1)\n pm.delete(pCons,oCons)\n # Also make up animCurve for animCurve mode\n animCurve=pm.createNode('animCurveUU')\n pm.setKeyframe(animCurve,itt=\"flat\",v=1.0,ott=\"flat\",f=0.0)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.85,ott=\"spline\",f=0.25)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.15,ott=\"spline\",f=0.75)\n pm.setKeyframe(animCurve,itt=\"flat\",v=0.0,ott=\"flat\",f=1.0)\n pm.connectAttr((animCurve + \".message\"),(pose + \".msgAnimCurve\"),f=1)\n pm.connectAttr((animCurve + \".output\"),(pose + \".animCurveOutput\"),f=1)\n\n pm.select(poses,r=1)\n # Now if we have more than one pose...connect them up to a multiTrigger node\n nPoses=len(poses)\n if nPoses>1:\n trig=pm.createNode(\"multiTrigger\")\n # Make a keyable attr people can actually see and use.\n pm.addAttr(trig,ln=\"weight\",k=1)\n pm.connectAttr((trig + \".outWeight\"),(trig + \".weight\"),f=1)\n i=0\n for i in range(0,nPoses):\n pm.connectAttr((poses[i] + \".weight\"),(trig + \".inputValues[\" + str(i) + \"]\"),f=1)\n pm.select(poses,trig,r=1)\n return pose", "def create_scene(self):\n\n c = config.Colors.background\n gr3.setbackgroundcolor(c[0], c[1], c[2], 1.0)\n gr3.clear()\n\n if self.results is None:\n return\n\n show_domains = self.settings.show_domains\n show_surface_cavities = self.settings.show_surface_cavities\n show_center_cavities = self.settings.show_center_cavities\n if show_center_cavities and self.results.center_cavities is not None:\n show_surface_cavities = False\n elif show_surface_cavities and self.results.surface_cavities is not None:\n show_domains = False\n\n self.objectids = [None]\n edges = self.results.atoms.volume.edges\n num_edges = len(edges)\n edge_positions = [edge[0] for edge in edges]\n edge_directions = [[edge[1][i]-edge[0][i] for i in range(3)] for edge in edges]\n edge_lengths = [sum([c*c for c in edge])**0.5 for edge in edge_directions]\n edge_radius = min(edge_lengths)/200\n if self.settings.show_bounding_box:\n gr3.drawcylindermesh(num_edges, edge_positions, edge_directions,\n [config.Colors.bounding_box]*num_edges,\n [edge_radius]*num_edges, edge_lengths)\n corners = list(set([tuple(edge[0]) for edge in edges] + [tuple(edge[1]) for edge in edges]))\n num_corners = len(corners)\n gr3.drawspheremesh(num_corners, corners,\n [config.Colors.bounding_box]*num_corners,\n [edge_radius]*num_corners)\n\n if self.settings.show_atoms and self.results.atoms is not None:\n visible_atom_indices = self.settings.visible_atom_indices\n if visible_atom_indices is not None:\n visible_atom_indices = [comp for comp in visible_atom_indices if 0 <= comp < self.results.atoms.number]\n else:\n visible_atom_indices = range(self.results.atoms.number)\n if len(visible_atom_indices) == 0:\n visible_atom_indices = None\n if visible_atom_indices is not None:\n visible_atom_indices = np.array(visible_atom_indices)\n gr3.drawspheremesh(len(visible_atom_indices),\n self.results.atoms.positions[visible_atom_indices],\n self.results.atoms.colors[visible_atom_indices],\n np.ones(len(visible_atom_indices))*config.OpenGL.atom_radius)\n if self.settings.show_bonds:\n bonds = self.results.atoms.bonds\n for start_index, target_indices in enumerate(bonds):\n if start_index not in visible_atom_indices:\n continue\n target_indices = np.array([i for i in target_indices if i in visible_atom_indices])\n if len(target_indices) == 0:\n continue\n start_position = self.results.atoms.positions[start_index]\n target_positions = self.results.atoms.positions[target_indices]\n directions = target_positions - start_position\n bond_lengths = la.norm(directions, axis=1)\n directions /= bond_lengths.reshape(len(directions), 1)\n gr3.drawcylindermesh(len(target_indices),\n target_positions,\n -directions,\n [config.Colors.bonds] * self.results.atoms.number,\n np.ones(bond_lengths.shape)*config.OpenGL.bond_radius,\n bond_lengths)\n\n if self.results is None:\n return\n if show_domains and self.results.domains is not None:\n self.draw_cavities(self.results.domains,\n config.Colors.domain, 'domain',\n self.settings.visible_domain_indices)\n if show_surface_cavities and self.results.surface_cavities is not None:\n self.draw_cavities(self.results.surface_cavities,\n config.Colors.surface_cavity, 'surface cavity',\n self.settings.visible_surface_cavity_indices)\n if show_center_cavities and self.results.center_cavities is not None:\n self.draw_cavities(self.results.center_cavities,\n config.Colors.center_cavity, 'center cavity',\n self.settings.visible_center_cavity_indices)", "def writeGroundPlane(self,obj,renderer):\n\n result = \"\"\n bbox = FreeCAD.BoundBox()\n for view in obj.Group:\n if view.Source and hasattr(view.Source,\"Shape\") and hasattr(view.Source.Shape,\"BoundBox\"):\n bbox.add(view.Source.Shape.BoundBox)\n if bbox.isValid():\n import Part\n margin = bbox.DiagonalLength/2\n p1 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMin-margin,0)\n p2 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMin-margin,0)\n p3 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMax+margin,0)\n p4 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMax+margin,0)\n\n # create temporary object. We do this to keep the renderers code as simple as possible:\n # they only need to deal with one type of object: RenderView objects\n dummy1 = FreeCAD.ActiveDocument.addObject(\"Part::Feature\",\"renderdummy1\")\n dummy1.Shape = Part.Face(Part.makePolygon([p1,p2,p3,p4,p1]))\n dummy2 = FreeCAD.ActiveDocument.addObject(\"App::FeaturePython\",\"renderdummy2\")\n View(dummy2)\n dummy2.Source = dummy1\n ViewProviderView(dummy2.ViewObject)\n FreeCAD.ActiveDocument.recompute()\n\n result = self.writeObject(dummy2,renderer)\n\n # remove temp objects\n FreeCAD.ActiveDocument.removeObject(dummy2.Name)\n FreeCAD.ActiveDocument.removeObject(dummy1.Name)\n FreeCAD.ActiveDocument.recompute()\n\n return result", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def fill_export_section():\n section = _SectionData(\"Export\")\n section.props.append((\"ExportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_scale)))\n section.props.append((\"ApplyModifiers\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_apply_modifiers))))\n section.props.append((\"ExcludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_exclude_edgesplit))))\n section.props.append((\"IncludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_include_edgesplit))))\n section.props.append((\"ActiveUVOnly\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_active_uv_only))))\n section.props.append((\"ExportVertexGroups\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_groups))))\n section.props.append((\"ExportVertexColor\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color))))\n section.props.append((\"ExportVertexColorType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type)))\n section.props.append((\"ExportVertexColorType7\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type_7)))\n # section.props.append((\"ExportAnimFile\", info.get_default_prop_value(bpy.types.GlobalSCSProps.export_anim_file)))\n section.props.append((\"ExportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pim_file))))\n section.props.append((\"OutputType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_output_type)))\n section.props.append((\"ExportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pit_file))))\n section.props.append((\"ExportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pic_file))))\n section.props.append((\"ExportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pip_file))))\n section.props.append((\"SignExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_write_signature))))\n return section", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def load_obj_render_BSR(objfile):\n #load a 3d model and render it in 2D\n obj = object3d() \n obj.load(objfile)\n \n #obj.scale_pts( (.1,.2,.1) )\n #obj.rotate_pts( (.1,.1,.1) )\n\n #obj2 = object3d() \n #obj2.load('objects/monkey.obj')\n\n bloody_simple_2drender('2d_render.png', obj=[obj], gridsize=100)", "def __init__(self, objects=()):\n\n vtk.vtkPropAssembly.__init__(self)\n\n self.name = \"\"\n self.created = \"\"\n self.trail = None\n self.trail_points = []\n self.trail_segment_size = 0\n self.trail_offset = None\n self.shadows = []\n self.info = {}\n self.rendered_at = set()\n self.transform = None\n self.scalarbar = None\n\n for a in vedo.utils.flatten(objects):\n if a:\n self.AddPart(a)\n\n self.PickableOff()", "def dump_model(self):", "def exportMasterLayerSettings(self):\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tmasterData = {}\n\t\tnodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals']\n\t\tmnNodes =[ mn.Node( n ) for n in nodes ]\n\t\tfor n in mnNodes:\n\t\t\tfor a in n.listAttr( se = True, v = True, w = True ):\n\t\t\t\ttry:\n\t\t\t\t\tmasterData[a] = a.v\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\tpickle.dump( masterData, open( self.masterPath.path, \"wb\" ) )", "def create_main_saver_node(self, version):\n fps = 25\n if version:\n project = version.task.project\n fps = project.fps\n\n random_ref_id = uuid.uuid4().hex\n\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n 'connected_to': {\n # 'ref_id': random_ref_id\n 'Input': {\n 'type': 'ColorCurves',\n 'ref_id': random_ref_id,\n 'input_list': {\n 'EditAlpha': 0.0,\n },\n 'connected_to': {\n 'Input': {\n 'type': 'CineonLog',\n 'input_list': {\n 'Mode': 1,\n # 'RedBlackLevel': 0.0,\n # 'RedWhiteLevel': 1023.0,\n 'RedFilmStockGamma': 1.0\n },\n }\n }\n }\n }\n },\n },\n {\n 'name': 'tga',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('tga'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'tga'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'TGAFormat',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 1,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 1,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mov',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mov'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mov'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'Apple ProRes 422 HQ_apch',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n\n 'QuickTimeMovies.LimitDataRate': 0.0,\n 'QuickTimeMovies.DataRateK': 1000.0,\n 'QuickTimeMovies.Advanced': 1.0,\n 'QuickTimeMovies.Primaries': 0.0,\n 'QuickTimeMovies.Transfer': 0.0,\n 'QuickTimeMovies.Matrix': 0.0,\n 'QuickTimeMovies.PixelAspectRatio': 0.0,\n 'QuickTimeMovies.ErrorDiffusion': 1.0,\n 'QuickTimeMovies.SaveAlphaChannel': 1.0,\n\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n\n\n\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n ]\n\n if version.task.type and version.task.type.name == 'Plate':\n # create a different type of outputs\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 0,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n },\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n },\n },\n ]\n\n # selectively generate output format\n saver_nodes = self.get_main_saver_node()\n\n for data in output_format_data:\n format_name = data['name']\n node_tree = data['node_tree']\n\n # now check if a node with the same name exists\n format_node = None\n format_node_name = self.output_node_name_generator(format_name)\n for node in saver_nodes:\n node_name = node.GetAttrs('TOOLS_Name')\n if node_name.startswith(format_node_name):\n format_node = node\n break\n\n # create the saver node for this format if missing\n if not format_node:\n self.create_node_tree(node_tree)\n else:\n # just update the input_lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = format_node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n try:\n os.makedirs(\n os.path.dirname(\n self.output_path_generator(version, format_name)\n )\n )\n except OSError:\n # path already exists\n pass", "def export(self, savepath):\n logger.debug(f\"Exporting scene to {savepath}\")\n _backend = self.backend\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n path = Path(savepath)\n if path.suffix != \".html\":\n raise ValueError(\"Savepath should point to a .html file\")\n\n # prepare settings\n vsettings.notebookBackend = \"k3d\"\n\n # Create new plotter and save to file\n plt = Plotter()\n plt.add(self.clean_renderables, render=False)\n plt = plt.show(interactive=False)\n plt.camera[-2] = -1\n\n with open(path, \"w\") as fp:\n fp.write(plt.get_snapshot())\n\n print(\n f\"The brainrender scene has been exported for web. The results are saved at {path}\"\n )\n\n # Reset settings\n vsettings.notebookBackend = None\n self.backend = _backend\n\n return str(path)", "def __init__(self, to_save):\r\n self.mesh = to_save.mesh\r\n self.max_T = to_save.max_T\r\n self.d_T = to_save.d_T\r\n # Data retention\r\n self.saved_data = to_save.saved_data\r\n self.node_map = to_save.node_map", "def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + 'camera\\n')\n buffer.write(ind + '{\\n')\n buffer.write(f'{ind}\\t\"position\" \"[{self.pos}]\"\\n')\n buffer.write(f'{ind}\\t\"look\" \"[{self.target}]\"\\n')\n buffer.write(ind + '}\\n')", "def __init__(self, scene: Scene):\n super(SceneGUI, self).__init__()\n\n self.scene = scene # save instance of Scene class to this object\n if scene.photons.size == 0:\n raise(Exception, \"no data stored in scene\")\n\n # QImage require data to be 32 bit aligned. Thus, we need to make sure out_size is even\n out_size = (round(scene.n_rows * 150/scene.n_cols)*2, 300)\n self.image = imresize(scene.srgb, out_size, interp='nearest')\n\n # set status bar\n self.statusBar().showMessage(\"Ready\")\n\n # set menu bar\n menu_bar = self.menuBar()\n menu_file = menu_bar.addMenu(\"&File\")\n menu_plot = menu_bar.addMenu(\"&Plot\")\n\n # add load scene to file menu\n load_scene = QtGui.QAction(\"Load Scene\", self)\n load_scene.setStatusTip(\"Load scene from file\")\n load_scene.triggered.connect(self.menu_load_scene)\n menu_file.addAction(load_scene)\n\n # add save scene to file menu\n save_scene = QtGui.QAction(\"Save Scene\", self)\n save_scene.setStatusTip(\"Save scene to file\")\n save_scene.setShortcut(\"Ctrl+S\")\n save_scene.triggered.connect(self.menu_save_scene)\n menu_file.addAction(save_scene)\n\n # add illuminant energy to plot menu\n plot_il_energy = QtGui.QAction(\"Illuminant (Energy)\", self)\n plot_il_energy.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_energy.triggered.connect(lambda: self.scene.plot(\"illuminant energy\"))\n menu_plot.addAction(plot_il_energy)\n\n # add illuminant photons to plot menu\n plot_il_quanta = QtGui.QAction(\"Illuminant (Photons)\", self)\n plot_il_quanta.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_quanta.triggered.connect(lambda: self.scene.plot(\"illuminant photons\"))\n menu_plot.addAction(plot_il_quanta)\n\n # set up left panel\n left_panel = self.init_image_panel()\n\n # set up right panel\n right_panel = self.init_control_panel()\n\n splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n splitter.addWidget(left_panel)\n splitter.addWidget(right_panel)\n\n QtGui.QApplication.setStyle(QtGui.QStyleFactory().create('Cleanlooks'))\n\n widget = QtGui.QWidget()\n hbox = QtGui.QHBoxLayout(widget)\n hbox.addWidget(splitter)\n\n self.setCentralWidget(widget)\n\n # set size and put window to center of the screen\n self.resize(600, 400)\n qr = self.frameGeometry()\n qr.moveCenter(QtGui.QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n # set title and show\n self.setWindowTitle(\"Scene GUI: \" + scene.name)\n self.show()", "def parse( self, data, baseURL, *args, **named ):\n sg = basenodes.sceneGraph(\n )\n \n # these three are shared among all shapes\n hash = md5( baseURL ).hexdigest()\n coord = basenodes.Coordinate( DEF='Coord-%s'%(hash,) )\n normal = basenodes.Normal(DEF='Norm-%s'%(hash,))\n texCoord = basenodes.TextureCoordinate(DEF='TexCoord-%s'%(hash,))\n\n mesh = None # transforms\n group = None # shape\n material = None # appearance, material, texture\n \n materials = {}\n \n # indices are 1-based, the first values are never used...\n vertices = [[0., 0., 0.]] \n normals = [[0., 0., 0.]]\n tex_coords = [[0., 0.]]\n \n current_vertex_indices = []\n current_normal_indices = []\n current_texcoord_indices = []\n\n for line in data.splitlines():\n if line.startswith('#'): \n continue\n values = line.split()\n if not values: \n continue\n\n if values[0] == 'v':\n vertices.append(map(float, values[1:4]))\n elif values[0] == 'vn':\n normals.append(map(float, values[1:4]))\n elif values[0] == 'vt':\n tex_coords.append(map(float, values[1:3]))\n elif values[0] == 'mtllib':\n self.load_material_library(values[1], materials, baseURL)\n elif values[0] in ('usemtl', 'usemat'):\n material = materials.get(values[1], None)\n if material is None:\n log.warn('Unknown material: %s', values[1])\n material = self.defaultMaterial()\n if mesh is not None:\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n current_vertex_indices = []\n current_texcoord_indices = []\n current_normal_indices = []\n group = basenodes.Shape(\n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n elif values[0] == 'o':\n mesh = basenodes.Transform( DEF = values[1] )\n sg.children.append( mesh )\n sg.regDefName( values[1], mesh )\n # previous shape is no longer current...\n group = None\n elif values[0] == 's':\n # a smoothing-group definition...\n # not currently supported...\n pass\n elif values[0] == 'f':\n # adds a single face\n if mesh is None:\n # anonymous transform\n mesh = basenodes.Transform()\n sg.children.append(mesh)\n if material is None:\n material = self.defaultMaterial()\n if group is None:\n group = basenodes.Shape( \n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n\n for i, v in enumerate(values[1:]):\n v_index, t_index, n_index = self._cleanIndex( v )\n current_vertex_indices.append( v_index )\n current_texcoord_indices.append( t_index )\n current_normal_indices.append( n_index )\n current_vertex_indices.append( -1 )\n current_texcoord_indices.append( -1 )\n current_normal_indices.append( -1 )\n else:\n log.warn( \"\"\"Unrecognized operation: %r\"\"\", values )\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n coord.point = vertices\n normal.normal = normals\n texCoord.texCoord = tex_coords\n return True,sg\n \n \n # this creates a pointset-only version of the geometry...", "def simulate(scene_name):\n fixtures = simulate_fixtures()\n scene = build_scene_from_fixtures(fixtures, scene_name)\n write_to_json(scene, scene_name)", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def saveObjectMaps(self):\n if self.objectMaps == None: return\n path = os.path.join(self.dir,settings['mosh.modInfos.objectMaps'])\n outDir = os.path.split(path)[0]\n if not os.path.exists(outDir): os.makedirs(outDir)\n cPickle.dump(self.objectMaps,open(path,'wb'),2)", "def render_save(scene, cam, globalIdx, trajDir, camDir, NI=1280, NJ=720):\n #render image/convert to bimg\n expimg = scene.render(cam, NI, NJ);\n bimg = convert_image(expimg); \n exp_fname = trajDir + \"/exp_%(#)06d.png\"%{\"#\":globalIdx};\n save_image(bimg, exp_fname); \n\n #save cam\n cam_name = camDir + \"/cam_%(#)06d.txt\"%{\"#\":globalIdx}\n save_perspective_camera(cam, cam_name)\n remove_from_db([cam, expimg, bimg])", "def dump_data(self,filename,dump_id):\n import pickle\n from Auxiliary import tdc_Filenames\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n dump_dict={}\n dump_dict['fft_data'] = data\n dump_dict['fitting_type'] = self.fft_fit.type \n dump_dict['nk_plot'] = self.fft_fit.nk_plot\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( dump_dict, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def saveData(self):\n\n data = super(OSSMouthGuide, self).saveData()\n\n # this should live in the GuideClass - also should considere Inherited Types\n data = self.saveAllObjectData(data, \"Control\")\n data = self.saveAllObjectData(data, \"Transform\")\n\n return data", "def output_cache(cc):\n\n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n\n with open(out_file, 'wb') as f:\n pickle.dump(cc, f)", "def draw_objects(self, view_manager):\n raise NotImplementedError(\"draw_objects can not be called directly from recoBase3D\")", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def draw(self):\n\n self.transform()\n\n node_object_age, proj_object_age = self._create_nodes_and_projections(nodetype='age')\n node_object_depth, proj_object_depth = self._create_nodes_and_projections(nodetype='depth')\n node_object_cond, proj_object_cond = self._create_nodes_and_projections(nodetype='condition')\n node_object_iter, proj_object_iter = self._create_nodes_and_projections(nodetype='iterations')\n\n edge_object = go.Scatter3d(x = self.Xe,\n y = self.Ye,\n z = self.Ze,\n mode = 'lines',\n line = go.scatter3d.Line(color = 'rgb(75,75,75)',\n width = 2\n ),\n hoverinfo = 'none',\n name = 'edges'\n )\n\n min_x = min(self.df['x'])\n max_x = max(self.df['x'])\n min_y = min(self.df['y'])\n max_y = max(self.df['y'])\n\n optval_object = go.Scatter3d(x = [min_x, min_x, max_x, max_x, min_x],\n y = [min_y, max_y, max_y, min_y, min_y],\n z = [self.optval] * 5,\n mode = 'lines',\n line = go.scatter3d.Line(color = 'rgb(0,200,0)',\n width = 10\n ),\n hoverinfo = 'name+z',\n name = 'optimal value',\n opacity = 0.3\n )\n\n xaxis = go.layout.scene.XAxis(showticklabels=False, title='X', backgroundcolor='white', gridcolor='lightgray')\n yaxis = go.layout.scene.YAxis(showticklabels=False, title='Y', backgroundcolor='white', gridcolor='lightgray')\n zaxis = go.layout.scene.ZAxis(title='objective value', backgroundcolor='white', gridcolor='lightgray')\n scene = go.layout.Scene(xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)\n title = 'TreeD for instance '+self.probname+', generated with '+self.scipversion if self.title else ''\n\n layout = go.Layout(title=title,\n font=dict(size=self.fontsize),\n autosize=True,\n # width=900,\n # height=600,\n showlegend=self.showlegend,\n hovermode='closest',\n scene=scene\n )\n\n updatemenus=list([\n dict(\n buttons=list([\n dict(\n args=['visible', [True, True, False, False, False, False, False, False, True, True]],\n label='Node Age',\n method='restyle'\n ),\n dict(\n args=['visible', [False, False, True, True, False, False, False, False, True, True]],\n label='Tree Depth',\n method='restyle'\n ),\n dict(\n args=['visible', [False, False, False, False, True, True, False, False, True, True]],\n label='LP Condition (log 10)',\n method='restyle'\n ),\n dict(\n args=['visible', [False, False, False, False, False, False, True, True, True, True]],\n label='LP Iterations',\n method='restyle'\n )\n ]),\n direction = 'down',\n showactive = True,\n type = 'buttons',\n # x = 1.2,\n # y = 0.6,\n ),\n ])\n\n # annotations = list([\n # dict(text='Color Mode:', showarrow=False, x=1.2, y=0.6, )\n # ])\n # layout['annotations'] = annotations\n layout['updatemenus'] = updatemenus\n\n data = [node_object_age, proj_object_age,\n node_object_depth, proj_object_depth,\n node_object_cond, proj_object_cond,\n node_object_iter, proj_object_iter,\n edge_object, optval_object]\n self.fig = go.FigureWidget(data = data, layout = layout)\n\n nicefilename = layout.title.text.replace(' ', '_')\n nicefilename = nicefilename.replace('\"', '')\n nicefilename = nicefilename.replace(',', '')\n\n if not self.use_iplot:\n plot(self.fig, filename = nicefilename + '.html', show_link=False)\n\n # generate html code to include into a website as <div>\n self.div = plot(self.fig, filename = nicefilename + '.html', show_link=False, include_plotlyjs=self.include_plotlyjs, output_type='div')", "def generate_data_mayavi(self):\n from enthought.mayavi.sources.api import ParametricSurface\n from enthought.mayavi.modules.api import Outline, Surface \n from enthought.mayavi.filters.api import WarpVector\n from enthought.mayavi.sources.vtk_data_source import VTKDataSource\n from enthought.tvtk.api import tvtk\n from numpy import array\n e = self.scene.engine\n# s = ParametricSurface()\n# e.add_source(s)\n# e.add_module(Outline())\n# e.add_module(Surface())\n # The numpy array data.\n #points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1]], 'f')\n points = array([[0,0,0], [1,0,0], [1,1,0], [0,1,0]], 'f')\n warp = array([[0,0,0], [100,0,0], [1,1,0], [0,1,0]])\n deformation = tvtk.DoubleArray()\n deformation.number_of_components = 3\n deformation.number_of_tuples = 4\n deformation.set_tuple3(0,0.,0.,0)\n deformation.set_tuple3(1,20.,-5.,0.)\n deformation.set_tuple3(2,15.,3.,0.)\n deformation.set_tuple3(3,-4.,2.,0)\n #triangles = array([[0,1,3], [0,3,2], [1,2,3], [0,2,1]])\n triangles = array([[0,1,2,3]])\n temperature = array([10., 20., -20., 10.])\n # The TVTK dataset.\n mesh = tvtk.PolyData(points=points, polys=triangles)\n #mesh = tvtk.UnstructuredGrid(points=points)\n #cel_type = 7\n #mesh.set_cells(cel_type, triangles)\n #mesh.point_data.scalars = temperature\n #mesh.point_data.scalars.name = 'Temperature'\n mesh.point_data.vectors = warp\n src = VTKDataSource(data = mesh)\n e.add_source(src)\n e.add_filter(WarpVector())\n e.add_module(Outline())\n e.add_module(Surface())", "def import_object(self, scenegroup, new_mesh, materials=None, offset_x=128.0, offset_y=128.0,\n offset_z=20.0):\n logger.debug(\"import_object\")\n pos = parse_vector(scenegroup[\"position\"])\n scale = parse_vector(scenegroup[\"scale\"])\n\n\n\n obj = self.getcreate_object(scenegroup[\"id\"], scenegroup[\"asset\"], new_mesh)\n\n if not scenegroup['groupid'] == '00000000-0000-0000-0000-000000000000':\n parent = self.findWithUUID(scenegroup['groupid'])\n if not parent:\n # XXX should register\n pass\n else:\n obj.parent = parent\n\n self.apply_position(obj, pos)\n self.apply_rotation(obj, parse_vector(scenegroup[\"rotation\"]))\n self.apply_scale(obj, scale)\n self.set_uuid(obj, str(scenegroup[\"id\"]))\n\n\n # new_mesh properties have to be set here otherwise blender\n # can crash!!\n self.set_uuid(new_mesh, str(scenegroup[\"asset\"]))\n if materials:\n if bversion == 3:\n for mat in materials:\n new_mesh.materials.append(mat)\n else:\n new_mesh.materials = materials\n scene = self.get_current_scene()\n try:\n if hasattr(obj, '_obj'):\n scene.objects.link(obj._obj)\n else:\n scene.objects.link(obj)\n except RuntimeError:\n pass # object already in scene\n editor.set_loading_state(obj, 'OK')\n #new_mesh.update()\n #obj.makeDisplayList()\n #new_mesh.hasVertexColours(True) # for now we create them as blender does\n\n return obj", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def generate_scene(self, n: int = 3, spawnbox: Optional[str] = None) -> List[bpy.types.Object]:\r\n locs = get_spawn_locs(n, spawnbox)\r\n rots = get_euler_rotations(n)\r\n\r\n self.src_objects: Tuple[bpy.types.Object]\r\n src_samples = random.choices(self.src_objects, k=n)\r\n\r\n copies = []\r\n for obj, loc, rot in zip(src_samples, locs, rots):\r\n new_obj = obj.copy() # Creates a \"placeholder\" that links to same attributes as orignal\r\n new_obj.data = obj.data.copy() # VERY IMPORTANT, actually replaces important stuff\r\n\r\n ##################################\r\n ### Set object attributes here ###\r\n ##################################\r\n new_obj.location = loc\r\n new_obj.rotation_euler = rot # Treated as radians\r\n new_obj.scale *= np.random.normal(loc=cng.RAND_SCALE_MU, scale=cng.RAND_SCALE_STD)\r\n new_obj.show_bounds = True\r\n new_obj.show_name = False\r\n ##################################\r\n ##################################\r\n\r\n # Link to target collection\r\n self.target_collection.objects.link(new_obj)\r\n copies.append(new_obj)\r\n\r\n return copies", "def write_viman(self, component_instance):\n # Get the id of the poster already created\n poster_id = self._poster_dict[component_instance.blender_obj.name]\n parent = component_instance.robot_parent\n\n scene = bge.logic.getCurrentScene()\n \n seen_objects = [obj['name'] for obj in component_instance.local_data['visible_objects']]\n\n i = 0\n for object_id in self.scene_object_list:\n\n try:\n t = time.time()\n tacq_sec = int(t)\n tacq_usec = int((t - tacq_sec) * 1000)\n ors_viman_poster.set_tacq(self.viman_data, i, tacq_sec, tacq_usec)\n \n if object_id in seen_objects:\n\n object = passive_objects.obj_from_label(object_id)\n\n position_3d = Transformation3d(object)\n logger.debug(\"VIMAN \" + object_id + \"(\" + object.name + \") is visible at \" + str(position_3d))\n ors_viman_poster.set_visible(self.viman_data, i, 1)\n _fill_world_matrix(self.viman_data, position_3d, i)\n _fill_robot_matrix(self.viman_data, parent, position_3d, i)\n else:\n ors_viman_poster.set_visible (self.viman_data, i, 0)\n \n \n # Write to the poster with the data for all objects\n posted = ors_viman_poster.real_post_viman_poster(poster_id, self.viman_data)\n except KeyError as detail:\n logger.debug(\"WARNING: Object %s not found in the scene\" % detail)\n pass\n posted = False\n\n i = i + 1", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)", "def exportAssetAssembly(name, rigTopNode, meshTopNode, path, postScript=None):\n if pm.ls(rigTopNode):\n rigTopNode = pm.PyNode(rigTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check your \"\n \"scene\".format(rigTopNode))\n return\n\n if pm.ls(meshTopNode):\n meshTopNode = pm.PyNode(meshTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check \"\n \"your scene\".format(meshTopNode))\n return\n # check the folder and script\n # if the target name exist abort and request another name\n\n deformer_jnts = rigTopNode.rigGroups[3].connections()[0].members()\n if not deformer_jnts:\n pm.displayError(\n \"{} is empty. The tool can't find any joint\".format(meshTopNode))\n\n # export connections and cut joint connections\n file_path = os.path.join(path, name + \".jmm\")\n dm_nodes = exportConnections(source=deformer_jnts,\n filePath=file_path,\n disc=True)\n\n # cut al possible remaining connection and adjust hierarchy\n # joint or visibility\n jnt_org = pm.PyNode(\"jnt_org\")\n pm.disconnectAttr(rigTopNode.jnt_vis, jnt_org.visibility)\n\n # restructure model\n model = pm.createNode(\"transform\",\n n=\"model\",\n p=None,\n ss=True)\n pm.addAttr(model, ln=\"rigGroups\", at='message', m=1)\n pm.parent(meshTopNode, jnt_org, model)\n\n # disconnect jnt set\n sets = rigTopNode.listConnections(type=\"objectSet\")\n\n deformersGrp = None\n for oSet in sets:\n if \"deformers_grp\" in oSet.name():\n deformersGrp = oSet\n\n if deformersGrp:\n for cnx in deformersGrp.message.listConnections(p=True):\n pm.disconnectAttr(deformersGrp.message, cnx)\n pm.connectAttr(deformersGrp.message, model.attr(\"rigGroups[0]\"))\n\n # disconnect bindPoses\n dg_poses = rigTopNode.message.listConnections(type=\"dagPose\", p=True)\n for dgp in dg_poses:\n if dgp.node().name().startswith(\"bindPose\"):\n pm.disconnectAttr(rigTopNode.message, dgp)\n\n # post script\n if postScript:\n try:\n exec(compile(open(postScript, \"rb\").read(), postScript, 'exec'))\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n pm.displayError(message)\n cont = pm.confirmBox(\"FAIL: Script Fail\",\n \"Do you want to export anyway?\" + \"\\n\\n\"\n + message + \"\\n\\n\" + traceback.format_exc(),\n \"Continue\", \"Cancel\")\n if not cont:\n pm.undo()\n return\n\n # export rig model\n pm.select(dm_nodes, r=True)\n pm.select(rigTopNode, add=True)\n file_path = os.path.join(path, name + \"_rig.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)\n\n # export mesh and joints\n pm.select(model, r=True)\n file_path = os.path.join(path, name + \"_model.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def export_data(self):\n return self.export_all_data()", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def run_dataset(data: DataSetBase) -> None:\n\n tracks_manager = data.load_tracks_manager()\n reconstructions = data.load_reconstruction()\n\n all_shot_ids = set(tracks_manager.get_shot_ids())\n for r in reconstructions:\n for shot in r.shots.values():\n if shot.id in all_shot_ids:\n vertices, faces = mesh.triangle_mesh(shot.id, r, tracks_manager)\n shot.mesh.vertices = vertices\n shot.mesh.faces = faces\n\n data.save_reconstruction(\n reconstructions, filename=\"reconstruction.meshed.json\", minify=True\n )", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n out.packSub0('NAME',self.cellName)\n #--Hack: Insert data record if necessary\n for record in self.records:\n if record.name == 'DATA': break\n else:\n self.records.insert(0,SubRecord('DATA',0))\n #--Top Records\n for record in self.records:\n if record.name == 'DATA':\n record.setData(struct.pack('3i',self.flags,self.gridX,self.gridY))\n record.getSize()\n record.dump(out)\n #--Objects\n inTempObjects = False\n for object in self.getObjects().list():\n #--Begin temp objects?\n if not inTempObjects and (object in self.tempObjects):\n out.packSub('NAM0','i',len(self.tempObjects))\n inTempObjects = True\n (iMod,iObj,objId,objRecords) = object\n for record in objRecords:\n #--FRMR/NAME placeholder?\n if isinstance(record,Cell_Frmr):\n out.pack('4si','FRMR',4)\n out.write(struct.pack('i',iObj)[:3])\n out.pack('B',iMod)\n out.packSub0('NAME',objId)\n else:\n record.getSize()\n record.dump(out)\n #--End Records\n for endRecord in self.endRecords:\n endRecord.getSize()\n endRecord.dump(out)", "def __init__(self, scene: Scene):\n self.scene = scene", "def serialize(self):", "def output_data(self):\n pass", "def dynExport(*args, allObjects: bool=True, attribute: Union[AnyStr, List[AnyStr]]=\"\", format:\n AnyStr=\"\", maxFrame: time=None, minFrame: time=None, onlyUpdateParticles:\n bool=True, overSampling: int=0, path: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def get_part_data(self, object, is_preset=False):\n # Get Matrix Data\n ob_world_matrix = object.matrix_world\n # Bring the matrix from Blender Z-Up soace into standard Y-up space.\n mat_rot = mathutils.Matrix.Rotation(math.radians(-90.0), 4, 'X')\n obj_wm_offset = mat_rot @ ob_world_matrix\n # Retrieve Position, Up and At vectors.\n pos = obj_wm_offset.decompose()[0]\n up = utils.get_direction_vector(obj_wm_offset, direction_matrix=\"up\")\n at = utils.get_direction_vector(obj_wm_offset, direction_matrix=\"at\")\n\n # Build dictionary.\n data = {\n \"ObjectID\": \"^{0}\".format(object[\"ObjectID\"]),\n \"Position\": [pos[0], pos[1], pos[2]],\n \"Up\": [up[0], up[1], up[2]],\n \"At\": [at[0], at[1], at[2]]\n }\n\n # Add Particular information.\n if not is_preset:\n timestamp = object[\"Timestamp\"]\n user_data = object[\"UserData\"]\n data[\"Timestamp\"] = int(timestamp)\n data[\"UserData\"] = int(user_data)\n\n return data", "def make_simple_scenes(self):\n clown = Clown()\n clown.set_location( 0, 0, 0 )\n clown.set_size( 1, 1, 1 )\n self.objects.append( clown )\n\n clown1Scene = Scene3D()\n clown1Scene.add_object( clown )\n self.scenes.append( clown1Scene )\n\n head = Head()\n head.set_location( 0, 0, 0 )\n head.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( head )\n\n headScene = Scene3D()\n headScene.add_object( head )\n self.scenes.append( headScene )\n\n hat = Hat()\n hat.set_location( 0, 0, 0 )\n hat.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( hat )\n\n hat1Scene = Scene3D()\n hat1Scene.add_object( hat )\n self.scenes.append( hat1Scene )\n\n eye = Eye()\n eye.set_color(1, 0, 0)\n eye.set_location(0, 1, 1)\n eye.set_size(1.3, 1.3, 1.3)\n eye.set_rotate( 45, 1, 0, 0 )\n self.objects.append( eye )\n\n eye1Scene = Scene3D()\n eye1Scene.add_object( eye )\n self.scenes.append( eye1Scene )\n\n donut = Donut()\n donut.set_color(1, 0, 1 )\n donut.set_location( 0, 0, 0 )\n donut.set_size( 2.0, 2.0, 2.0 )\n donut.set_rotate( 45, 0, 1, 0)\n self.objects.append( donut )\n\n donut1Scene = Scene3D()\n donut1Scene.add_object( donut )\n self.scenes.append( donut1Scene )\n\n cone = Cone()\n cone.set_color( 1, 0, 1 )\n cone.set_location( 0, 0, 0 )\n cone.set_size( 2.0, 2.0, 2.0 )\n self.objects.append( cone )\n\n cone1Scene = Scene3D()\n cone1Scene.add_object( cone )\n self.scenes.append( cone1Scene )\n\n box1 = self.make_box(1, Color(1, 0, 1))\n self.objects.append( box1 )\n\n box1Scene = Scene3D()\n box1Scene.add_object( box1 )\n self.scenes.append( box1Scene )\n\n box2 = self.make_box( 1, Color(0, 1, 1 ))\n box2.set_rotate( 45, 0, 0, 1 )\n box2.set_size(2.0, 2.0, 2.0)\n self.objects.append( box2 )\n\n box2Scene = Scene3D()\n box2Scene.add_object( box2 )\n self.scenes.append( box2Scene )\n\n sp = self.make_ball(1, Color(0.8, 0.8, 0))\n sp.set_size(2.0, 2.0, 2.0)\n self.objects.append( sp )\n\n ballScene = Scene3D()\n ballScene.add_object( sp )\n self.scenes.append( ballScene )", "def _collect_scene_data(self, config):\n\n self._config = config\n self.scenes_root_path = config['scenes_root_path']\n assert(os.path.isdir(self.scenes_root_path))\n\n self._scene_dict = dict()\n # each one is a list of scenes\n self._all_image_paths = {\"train\": [], \"test\": []}\n\n for key, val in self._all_image_paths.items():\n for scene_collection_name in config[key]:\n scene_collection_dir = os.path.join(self.scenes_root_path, scene_collection_name)\n assert os.path.isdir(scene_collection_dir), scene_collection_dir\n # Scan all scenes in this scene dir\n for scene_name in os.listdir(scene_collection_dir):\n full = os.path.join(scene_collection_dir, scene_name)\n if os.path.isdir(full):\n val += self._get_all_rgb_image_paths_in_scene_dir(full)", "def save_world(self):\n pass" ]
[ "0.63344496", "0.62675095", "0.6227879", "0.6224553", "0.6189659", "0.6154672", "0.6018009", "0.59942466", "0.5959543", "0.5952904", "0.591055", "0.5901034", "0.588326", "0.5835366", "0.58250904", "0.5816034", "0.57584465", "0.57536954", "0.5716126", "0.57042253", "0.5703749", "0.56667525", "0.56217647", "0.56033295", "0.5602407", "0.5595913", "0.55618834", "0.5528604", "0.551677", "0.5508003", "0.55058825", "0.5504343", "0.5480369", "0.54794794", "0.54793483", "0.5478127", "0.5453202", "0.5453165", "0.5445105", "0.5442484", "0.5434739", "0.5420123", "0.5418141", "0.54136235", "0.5397722", "0.5391705", "0.53835446", "0.53636765", "0.53498197", "0.53478944", "0.5338076", "0.5338076", "0.53371066", "0.53284204", "0.5325644", "0.5311933", "0.53070813", "0.5301007", "0.52982855", "0.5297192", "0.5293307", "0.52903795", "0.5285061", "0.528384", "0.52826697", "0.5280214", "0.52765334", "0.5275085", "0.527237", "0.52694", "0.5267777", "0.5265228", "0.5262105", "0.5260106", "0.5259219", "0.5246919", "0.5242", "0.52415675", "0.52235484", "0.52122205", "0.52104145", "0.52070826", "0.51997167", "0.5197847", "0.5197847", "0.5196669", "0.5192379", "0.5192379", "0.5190151", "0.51895726", "0.5188732", "0.5187803", "0.5184238", "0.5180978", "0.5180489", "0.5178086", "0.51763403", "0.5171837", "0.5166415", "0.51663274" ]
0.73334426
0
export lights from scene
def exportLights(self): #TODO! REMOVE CONSTRAINS lights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 ) mc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' ) litsToExport = [] for li in lights: finalLi = li.split( '|' ) if len(finalLi) == 1: litsToExport.append( finalLi[0] ) else: litsToExport.append( finalLi[1] ) if litsToExport: mc.select( litsToExport, r=1, ne=1 ) mc.file( self.lightPath.path, op="v=0", typ="mayaAscii", pr=1, es=1 ) #export Light Linking self.exportLightLinking()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def flicker_lights(self):\n print 'Lights Set'", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def lights(self):\n return list(self.GetLights())", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def import_scene(file_path):\n\n pass", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "def turnLightingSystemOn():\n dislin.light('ON')", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def setup_scene_for_rgb_render(scene, outdir):\n # Use node rendering for python control\n scene.use_nodes = True\n tree = scene.node_tree\n links = tree.links\n\n # Make sure there are no existing nodes\n for node in tree.nodes:\n tree.nodes.remove(node)\n\n # Set up a renderlayer and plug it into our remapping layer\n inp = tree.nodes.new('CompositorNodeRLayers')\n\n if (bpy.app.version[1] >= 70): # Don't apply color transformation -- changed in Blender 2.70\n scene.view_settings.view_transform = 'Raw'\n scene.sequencer_colorspace_settings.name = 'Non-Color'\n\n # Save it out\n if outdir:\n out = tree.nodes.new('CompositorNodeOutputFile')\n ident = str(uu.uuid4())\n out.file_slots[0].path = ident\n out.base_path = outdir\n # out.format.color_mode = 'BW'\n # out.format.color_depth = settings.DEPTH_BITS_PER_CHANNEL\n out.format.color_mode = 'RGB'\n out.format.color_depth = settings.COLOR_BITS_PER_CHANNEL\n out.format.file_format = settings.PREFERRED_IMG_EXT.upper()\n links.new(inp.outputs[0], out.inputs[0])\n ext = utils.img_format_to_ext[settings.PREFERRED_IMG_EXT.lower()]\n temp_filename = \"{0}0001.{1}\".format(ident, ext)\n return os.path.join(outdir, temp_filename)\n else:\n out = tree.nodes.new('CompositorNodeComposite')\n links.new(inp.outputs[0], out.inputs[0])\n return None", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def deleteAllModelsFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('manual-seg_*') != {}:\n nodes = slicer.util.getNodes('manual-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('obturator-seg_*') != {}:\n nodes = slicer.util.getNodes('obturator-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n #while slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode') !={}:\n # nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n # for node in nodes.values():\n # slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('template slice position*') != {}:\n nodes = slicer.util.getNodes('template slice position*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n sYellow.SetSliceVisible(0)\n reformatLogic = slicer.vtkSlicerReformatLogic()\n reformatLogic.SetSliceNormal(sYellow,1,0,0)\n tempFidNodes = slicer.mrmlScene.GetNodesByName('Temp')\n for i in range(tempFidNodes.GetNumberOfItems()):\n node = tempFidNodes.GetItemAsObject(i)\n if node:\n slicer.mrmlScene.RemoveNode(node)\n sYellow.Modified()", "def lights(self, mask, inplace=True):\n # TODO: Fields in this methods have to be given by the user and not be static\n # TODO: use BasicEdit.create_object()\n\n lights = list()\n\n zone_names = self.get_field(mask, 'Name')\n\n for zone in zone_names:\n lights.append(list())\n lights[-1].append(\"Lights\") # Object type\n lights[-1].append(\"Lights {}\".format(zone)) # Object name\n lights[-1].append(zone) # Zone\n lights[-1].append(\"Lights Schedule {}\".format(zone)) # Schedule name\n lights[-1].append(\"Watts/Area\") # Design Level Calculation Method\n lights[-1].append(\"\") # Lighting Level {W}\n lights[-1].append(\"2.9\") # Watts per Zone Floor Area {W/m2}\n lights[-1].append(\"\") # Watts per Person {W/person}\n lights[-1].append(\"\") # Return Air Fraction\n lights[-1].append(\"\") # Fraction Radiant\n lights[-1].append(\"\") # Fraction Visible\n lights[-1].append(\"\") # Fraction Replaceable\n\n return lights", "async def Rainbow_Lights():\n busylightapi.manager.apply_effect_to_light(ALL_LIGHTS, rainbow)\n return {\n \"action\": \"effect\",\n \"name\": \"rainbow\",\n \"light_id\": \"all\",\n }", "def export_world(file, world, scene, global_matrix, tab_write):\n render = scene.pov\n agnosticrender = scene.render\n camera = scene.camera\n # matrix = global_matrix @ camera.matrix_world # view dependant for later use NOT USED\n if not world:\n return\n\n # These lines added to get sky gradient (visible with PNG output)\n\n # For simple flat background:\n if not world.pov.use_sky_blend:\n # No alpha with Sky option:\n if render.alpha_mode == \"SKY\" and not agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 0>}\\n\" % (world.pov.horizon_color[:])\n )\n\n elif render.alpha_mode == \"STRAIGHT\" or agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 1>}\\n\" % (world.pov.horizon_color[:])\n )\n else:\n # Non fully transparent background could premultiply alpha and avoid\n # anti-aliasing display issue\n tab_write(\n file,\n \"background {rgbft<%.3g, %.3g, %.3g, %.3g, 0>}\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n\n world_tex_count = 0\n # For Background image textures\n for t in world.pov_texture_slots: # risk to write several sky_spheres but maybe ok.\n if t:\n tex = bpy.data.textures[t.texture]\n if tex.type is not None:\n world_tex_count += 1\n # XXX No enable checkbox for world textures yet (report it?)\n # if t and tex.type == 'IMAGE' and t.use:\n if tex.type == \"IMAGE\":\n image_filename = path_image(tex.image)\n if tex.image.filepath != image_filename:\n tex.image.filepath = image_filename\n if image_filename != \"\" and t.use_map_blend:\n textures_blend = image_filename\n # colvalue = t.default_value\n t_blend = t\n\n # Commented below was an idea to make the Background image oriented as camera\n # taken here:\n # http://news.pov.org/pov.newusers/thread/%3Cweb.4a5cddf4e9c9822ba2f93e20@news.pov.org%3E/\n # Replace 4/3 by the ratio of each image found by some custom or existing\n # function\n # mapping_blend = (\" translate <%.4g,%.4g,%.4g> rotate z*degrees\" \\\n # \"(atan((camLocation - camLookAt).x/(camLocation - \" \\\n # \"camLookAt).y)) rotate x*degrees(atan((camLocation - \" \\\n # \"camLookAt).y/(camLocation - camLookAt).z)) rotate y*\" \\\n # \"degrees(atan((camLocation - camLookAt).z/(camLocation - \" \\\n # \"camLookAt).x)) scale <%.4g,%.4g,%.4g>b\" % \\\n # (t_blend.offset.x / 10 , t_blend.offset.y / 10 ,\n # t_blend.offset.z / 10, t_blend.scale.x ,\n # t_blend.scale.y , t_blend.scale.z))\n # using camera rotation valuesdirectly from blender seems much easier\n if t_blend.texture_coords == \"ANGMAP\":\n mapping_blend = \"\"\n else:\n # POV-Ray \"scale\" is not a number of repetitions factor, but its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # UV scale is 0.5,0.5 in blender and 0,0 in POV\n # Further Scale by 2 and translate by -1 are\n # required for the sky_sphere not to repeat\n\n mapping_blend = (\n \"scale 2 scale <%.4g,%.4g,%.4g> translate -1 \"\n \"translate <%.4g,%.4g,%.4g> rotate<0,0,0> \"\n % (\n (1.0 / t_blend.scale.x),\n (1.0 / t_blend.scale.y),\n (1.0 / t_blend.scale.z),\n 0.5 - (0.5 / t_blend.scale.x) - t_blend.offset.x,\n 0.5 - (0.5 / t_blend.scale.y) - t_blend.offset.y,\n t_blend.offset.z,\n )\n )\n\n # The initial position and rotation of the pov camera is probably creating\n # the rotation offset should look into it someday but at least background\n # won't rotate with the camera now.\n # Putting the map on a plane would not introduce the skysphere distortion and\n # allow for better image scale matching but also some waay to chose depth and\n # size of the plane relative to camera.\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n tab_write(\n file,\n 'image_map{%s \"%s\" %s}\\n'\n % (image_format(textures_blend), textures_blend, img_map_bg(t_blend)),\n )\n tab_write(file, \"}\\n\")\n tab_write(file, \"%s\\n\" % mapping_blend)\n # The following layered pigment opacifies to black over the texture for\n # transmit below 1 or otherwise adds to itself\n tab_write(file, \"pigment {rgb 0 transmit %s}\\n\" % tex.intensity)\n tab_write(file, \"}\\n\")\n # tab_write(file, \"scale 2\\n\")\n # tab_write(file, \"translate -1\\n\")\n\n # For only Background gradient\n\n if world_tex_count == 0 and world.pov.use_sky_blend:\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n # maybe Should follow the advice of POV doc about replacing gradient\n # for skysphere..5.5\n tab_write(file, \"gradient y\\n\")\n tab_write(file, \"color_map {\\n\")\n\n if render.alpha_mode == \"TRANSPARENT\":\n tab_write(\n file,\n \"[0.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n tab_write(\n file,\n \"[1.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.zenith_color[0],\n world.pov.zenith_color[1],\n world.pov.zenith_color[2],\n render.alpha_filter,\n ),\n )\n if agnosticrender.film_transparent or render.alpha_mode == \"STRAIGHT\":\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.horizon_color[:]))\n # aa premult not solved with transmit 1\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.zenith_color[:]))\n else:\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.horizon_color[:]))\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.zenith_color[:]))\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n # Sky_sphere alpha (transmit) is not translating into image alpha the same\n # way as 'background'\n\n # if world.pov.light_settings.use_indirect_light:\n # scene.pov.radio_enable=1\n\n # Maybe change the above to a function copyInternalRenderer settings when\n # user pushes a button, then:\n # scene.pov.radio_enable = world.pov.light_settings.use_indirect_light\n # and other such translations but maybe this would not be allowed either?\n\n # -----------------------------------------------------------------------------\n\n mist = world.mist_settings\n\n if mist.use_mist:\n tab_write(file, \"fog {\\n\")\n if mist.falloff == \"LINEAR\":\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) * 0.368))\n elif mist.falloff in [\"QUADRATIC\", \"INVERSE_QUADRATIC\"]: # n**2 or squrt(n)?\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) ** 2 * 0.368))\n tab_write(\n file,\n \"color rgbt<%.3g, %.3g, %.3g, %.3g>\\n\"\n % (*world.pov.horizon_color, (1.0 - mist.intensity)),\n )\n # tab_write(file, \"fog_offset %.6f\\n\" % mist.start) #create a pov property to prepend\n # tab_write(file, \"fog_alt %.6f\\n\" % mist.height) #XXX right?\n # tab_write(file, \"turbulence 0.2\\n\")\n # tab_write(file, \"turb_depth 0.3\\n\")\n tab_write(file, \"fog_type 1\\n\") # type2 for height\n tab_write(file, \"}\\n\")\n if scene.pov.media_enable:\n tab_write(file, \"media {\\n\")\n tab_write(\n file,\n \"scattering { %d, rgb %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (\n int(scene.pov.media_scattering_type),\n scene.pov.media_diffusion_scale,\n *(scene.pov.media_diffusion_color[:]),\n ),\n )\n if scene.pov.media_scattering_type == \"5\":\n tab_write(file, \"eccentricity %.3g\\n\" % scene.pov.media_eccentricity)\n tab_write(file, \"}\\n\")\n tab_write(\n file,\n \"absorption %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (scene.pov.media_absorption_scale, *(scene.pov.media_absorption_color[:])),\n )\n tab_write(file, \"\\n\")\n tab_write(file, \"samples %.d\\n\" % scene.pov.media_samples)\n tab_write(file, \"}\\n\")", "def render_image(camera, scene, lights, nx, ny):\n # TODO A5 copy implementation from A4\n img = np.zeros((ny, nx, 3), np.float32)\n\n for x in range(0, nx):\n for y in range(0, ny):\n u = (x + 0.5) / nx\n v = (y + 0.5) / ny\n ray = camera.generate_ray((u, v))\n hit = scene.intersect(ray)\n img[y][x] = shade(ray, hit, scene, lights)\n\n return img", "def new_light():\n name = request.args.get('name')\n types = request.args.get('channels').split(\",\")\n all_lights[name] = types\n print name, all_lights[name]\n with open('data/lights.json', 'w') as f:\n f.write(json.dumps(all_lights))\n return json_back()\n return \"ERROR\"", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def draw_scene():\n # Place the camera\n camera.placeCamera()\n \n \n # Set up the global ambient light. (Try commenting out.)\n amb = [ 0*brightness, 0*brightness, 0*brightness, 1.0 ]\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, amb)\n\n # Set up the main light (LIGHT0)... or not.\n if is_light_on:\n place_blue_light()\n place_red_light()\n place_green_light()\n place_lamp_light()\n else:\n glDisable(GL_LIGHT0)\n glDisable(GL_LIGHT1)\n glDisable(GL_LIGHT2)\n glDisable(GL_LIGHT3)\n\n if lamp_light:\n place_lamp_light()\n else:\n glDisable(GL_LIGHT3)\n\n if headlamp_is_on:\n place_headlamp_light()\n else:\n glDisable(GL_LIGHT4)\n\n # Now spin the world around the y-axis (for effect).\n glRotated(angle_movement, 0, 1, 0)\n draw_objects()", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def get_light_list(self):\n return self.light_array", "def _on_unload_scene_shaders(self):\n\n artellapipe.ShadersMgr().unload_shaders()", "def export_rainbows(rainbows, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n pov_mat_name = \"Default_texture\"\n for ob in rainbows:\n povdataname = ob.data.name # enough? XXX not used nor matrix fn?\n angle = degrees(ob.data.spot_size / 2.5) # radians in blender (2\n width = ob.data.spot_blend * 10\n distance = ob.data.shadow_buffer_clip_start\n # eps=0.0000001\n # angle = br/(cr+eps) * 10 #eps is small epsilon variable to avoid dividing by zero\n # width = ob.dimensions[2] #now let's say width of rainbow is the actual proxy height\n # formerly:\n # cz-bz # let's say width of the rainbow is height of the cone (interfacing choice\n\n # v(A,B) rotates vector A about origin by vector B.\n # and avoid a 0 length vector by adding 1\n\n # file.write(\"#declare %s_Target= vrotate(<%.6g,%.6g,%.6g>,<%.4g,%.4g,%.4g>);\\n\" % \\\n # (povdataname, -(ob.location.x+0.1), -(ob.location.y+0.1), -(ob.location.z+0.1),\n # ob.rotation_euler.x, ob.rotation_euler.y, ob.rotation_euler.z))\n\n direction = ( # XXX currently not used (replaced by track to?)\n ob.location.x,\n ob.location.y,\n ob.location.z,\n ) # not taking matrix into account\n rmatrix = global_matrix @ ob.matrix_world\n\n # ob.rotation_euler.to_matrix().to_4x4() * mathutils.Vector((0,0,1))\n # XXX Is result of the below offset by 90 degrees?\n up = ob.matrix_world.to_3x3()[1].xyz # * global_matrix\n\n # XXX TO CHANGE:\n # formerly:\n # tab_write(file, \"#declare %s = rainbow {\\n\"%povdataname)\n\n # clumsy for now but remove the rainbow from instancing\n # system because not an object. use lamps later instead of meshes\n\n # del data_ref[dataname]\n tab_write(file, \"rainbow {\\n\")\n\n tab_write(file, \"angle %.4f\\n\" % angle)\n tab_write(file, \"width %.4f\\n\" % width)\n tab_write(file, \"distance %.4f\\n\" % distance)\n tab_write(file, \"arc_angle %.4f\\n\" % ob.pov.arc_angle)\n tab_write(file, \"falloff_angle %.4f\\n\" % ob.pov.falloff_angle)\n tab_write(file, \"direction <%.4f,%.4f,%.4f>\\n\" % rmatrix.translation[:])\n tab_write(file, \"up <%.4f,%.4f,%.4f>\\n\" % (up[0], up[1], up[2]))\n tab_write(file, \"color_map {\\n\")\n tab_write(file, \"[0.000 color srgbt<1.0, 0.5, 1.0, 1.0>]\\n\")\n tab_write(file, \"[0.130 color srgbt<0.5, 0.5, 1.0, 0.9>]\\n\")\n tab_write(file, \"[0.298 color srgbt<0.2, 0.2, 1.0, 0.7>]\\n\")\n tab_write(file, \"[0.412 color srgbt<0.2, 1.0, 1.0, 0.4>]\\n\")\n tab_write(file, \"[0.526 color srgbt<0.2, 1.0, 0.2, 0.4>]\\n\")\n tab_write(file, \"[0.640 color srgbt<1.0, 1.0, 0.2, 0.4>]\\n\")\n tab_write(file, \"[0.754 color srgbt<1.0, 0.5, 0.2, 0.6>]\\n\")\n tab_write(file, \"[0.900 color srgbt<1.0, 0.2, 0.2, 0.7>]\\n\")\n tab_write(file, \"[1.000 color srgbt<1.0, 0.2, 0.2, 1.0>]\\n\")\n tab_write(file, \"}\\n\")\n\n # tab_write(file, \"texture {%s}\\n\"%pov_mat_name)\n write_object_modifiers(ob, file)\n # tab_write(file, \"rotate x*90\\n\")\n # matrix = global_matrix @ ob.matrix_world\n # write_matrix(file, matrix)\n tab_write(file, \"}\\n\")\n # continue #Don't render proxy mesh, skip to next object", "def scenes_to_frames():\n # Scene 001 from frames 1-150\n cmd.scene('001', animate=0)\n cmd.mview('store', 1)\n cmd.mview('store', 150)\n # Scene 002 from frames 250-400\n cmd.scene('002', animate=0)\n cmd.mview('store', 250)\n cmd.mview('store', 400)", "def create_scene(self):\n \n self.scene=soya.World()", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def traffic_light_cb(self, msg):\n\n # Save the traffic light array\n self.lights = msg.lights", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def initialize_lights(self):\n\t\tfor light in OUTPUT.LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.set_bit(light, 0)\n\t\tfor order in self.orderQueue.yield_orders(exclude=(None,)):\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def set_button_light(self, floor, lights, value):\n\t\tif lights[floor] != -1:\n\t\t\tio.set_bit(lights[floor], value)", "def toggle_lights(bridge):\n if check_any_light_on(bridge):\n turn_off_lights(bridge)\n else:\n turn_on_lights(bridge)", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def lightsOn(strip, interval):\r\n clearStrip(strip)\r\n print(\"lightsOn\", strip, interval)\r\n fade(LED_COLOR_OFF, LED_COLOR_FULL, STEPS, interval, strip)", "def lights_on(self) -> list:\n return [\n entity for entity in self.all_lights if self.hass.get_state(entity) == \"on\"\n ]", "def get_light_sky(filenames, onoff=True):\n return filter_filenames(filenames, [\"light-off\"], onoff)", "def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True", "def export_image(self, name):\n\t\tred = Color(\"red\")\n\t\tblue = Color(\"blue\")\n\t\twhite = Color(\"white\")\n\t\tblack = Color(\"black\")\n\t\tgold = Color(\"gold\")\n\t\trgb_gold = []\n\t\tfor part in gold.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_gold.append(part)\n\t\trgb_black = []\n\t\tfor part in black.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_black.append(part)\n\t\trgb_white = []\n\t\tfor part in white.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_white.append(part)\n\t\tcolours = list(red.range_to(blue, int(self.grains)))\n\t\timage = np.zeros([self.space.shape[1],self.space.shape[0], 3], dtype=np.uint(8))\n\t\tfor grain in range(self.grains+1):\n\t\t\trgb = []\n\t\t\tfor part in colours[grain-1].rgb:\n\t\t\t\tpart = part * 255\n\t\t\t\trgb.append(part)\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state == grain:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb\n\t\t\t\tif cell.state == 999:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_black\n\t\t\t\tif cell.state == 500:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_gold\n\t\timg = Image.fromarray(image.astype('uint8'))\n\t\timg = img.resize((self.space.shape[1]*3,self.space.shape[0]*3))\n\t\timg.save('./static/temp/'+str(name)+'.png')", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n self.__importLogFiles.append(self.__logFiles[-1])\r\n \r\n command = (\"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"NewScene, false\\n\")\r\n if (FUtils.GetExtension(filename) == \"dae\"):\r\n command = (command + \r\n \"set myIProp = CreateImportFTKOptions()\\n\" +\r\n \"myIProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n filename.replace(\"\\\\\", \"\\\\\\\\\") +\"\\\"\\n\" +\r\n \"myIProp.Parameters(\\\"Verbose\\\").Value = True\\n\")\r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__IMPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myIProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + value + \"\\n\")\r\n command = command + \"ImportFTK myIProp.Name \\n\"\r\n elif (FUtils.GetExtension(filename) == \"scn\"):\r\n command = (command +\r\n \"OpenScene \\\"\" + filename.replace(\"\\\\\",\"\\\\\\\\\") + \"\\\"\\n\")\r\n else: \r\n return\r\n \r\n self.__currentImportProperName = FUtils.GetProperFilename(filename)\r\n basename = self.__currentImportProperName + \".scn\"\r\n\r\n# self.__script.write(\r\n# command +\r\n# \"SearchAndReplacePath \\\"All\\\", \\\"\" + FXsi.__REPLACE_PATH + \r\n# \"\\\", \\\"\" + \r\n# os.path.dirname(filename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n# \"\\\", True\\n\" +\r\n# \"SaveSceneAs \\\"\" + \r\n# os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n# \"\\\"\\n\"\r\n# )\r\n \r\n self.__script.write(\r\n command +\r\n \"SaveSceneAs \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n \"\\\"\\n\"\r\n )\r\n \r\n self.__testCount = self.__testCount + 1\r\n \r\n return [basename,]", "def define_materials():\n global robot\n robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))\n robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))\n robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))\n robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))", "def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)", "def get_rgb_light():\n return list(light.rgb())", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "def writePointLight(self,view,renderer):\n # get location, color, power\n pl = view.Source.PropertiesList\n\n try:\n location = view.Source.Location\n color = view.Source.Color\n except AttributeError:\n FreeCAD.Console.PrintError(translate(\"Render\",\"Cannot render Point Light: Missing location and/or color attributes\"))\n return \"\"\n power = getattr(view.Source,\"Power\",60) # We accept missing Power (default value: 60)...\n\n # send everything to renderer module\n return renderer.writePointLight(view,location,color,power)", "def _set_backpack_led(self, msg):\n # setup color as integer values\n color = [int(x * 255) for x in [msg.r, msg.g, msg.b, msg.a]]\n # create lights object with duration\n light = cozmo.lights.Light(cozmo.lights.Color(rgba=color), on_period_ms=1000)\n # set lights\n self._cozmo.set_all_backpack_lights(light)", "def testExportDisplayColorShading(self):\n # Validate the displayColor on the mesh prim.\n cubePrim = self._stage.GetPrimAtPath('/RedCube/Geom/Cube')\n self.assertTrue(cubePrim)\n\n cubeMesh = UsdGeom.Mesh(cubePrim)\n self.assertTrue(cubeMesh)\n\n meshDisplayColors = cubeMesh.GetDisplayColorPrimvar().Get()\n self.assertEqual(len(meshDisplayColors), 1)\n self.assertTrue(Gf.IsClose(meshDisplayColors[0], \n mayaUsdLib.ConvertMayaToLinear(self.RED_COLOR), \n 1e-6))\n\n # Validate the Material prim bound to the Mesh prim.\n materialBindingAPI = UsdShade.MaterialBindingAPI(cubePrim)\n material = materialBindingAPI.ComputeBoundMaterial()[0]\n self.assertTrue(material)\n materialPath = material.GetPath().pathString\n self.assertEqual(materialPath, '/RedCube/Materials/RedLambertSG')\n\n materialInputs = material.GetInputs()\n self.assertEqual(len(materialInputs), 3)\n\n materialInput = material.GetInput('displayColor')\n matDisplayColor = materialInput.Get()\n self.assertTrue(Gf.IsClose(matDisplayColor,\n mayaUsdLib.ConvertMayaToLinear(self.RED_COLOR), \n 1e-6))\n\n # Just verify that displayOpacity and transparency exist.\n materialInput = material.GetInput('displayOpacity')\n self.assertTrue(materialInput)\n\n materialInput = material.GetInput('transparency')\n self.assertTrue(materialInput)\n\n # Validate the surface shader that is connected to the material.\n materialOutputs = material.GetOutputs()\n self.assertEqual(len(materialOutputs), 4)\n print(self._stage.ExportToString())\n materialOutput = material.GetOutput('ri:surface')\n (connectableAPI, outputName, outputType) = materialOutput.GetConnectedSource()\n self.assertEqual(outputName, 'out')\n shader = UsdShade.Shader(connectableAPI)\n self.assertTrue(shader)\n self.assertEqual(shader.GetPrim().GetName(), 'RedLambertSG_lambert')\n\n shaderId = shader.GetIdAttr().Get()\n self.assertEqual(shaderId, 'PxrDiffuse')\n\n shaderInputs = shader.GetInputs()\n self.assertEqual(len(shaderInputs), 2)\n\n diffuseInput = shader.GetInput('diffuseColor')\n self.assertTrue(diffuseInput)\n (connectableAPI, outputName, outputType) = diffuseInput.GetConnectedSource()\n self.assertEqual(outputName, 'displayColor')\n self.assertTrue(connectableAPI)\n self.assertEqual(connectableAPI.GetPath().pathString, materialPath)\n\n transmissionInput = shader.GetInput('transmissionColor')\n self.assertTrue(transmissionInput)\n (connectableAPI, outputName, outputType) = transmissionInput.GetConnectedSource()\n self.assertEqual(outputName, 'transparency')\n self.assertTrue(connectableAPI)\n self.assertEqual(connectableAPI.GetPath().pathString, materialPath)", "def export_illumina(args):\n clarity_epp.export.illumina.update_samplesheet(lims, args.process_id, args.artifact_id, args.output_file)", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def list_scene(command):\n namespace = app.main(command)\n assert namespace.command == 'ls' or namespace.command == \"listscenes\"", "def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)", "def save_scene(force=True, **kwargs):\n\n pass", "def control_lights(state):\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led],state[led])", "def _on_hires_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_hires()", "def index():\n global states\n with open('data/states.json', 'r') as f:\n states = json.loads(f.read())\n # COLORS: https://www.w3schools.com/w3css/w3css_colors.asp\n return render_template('UI.html', main_color=\"orange\", adresses=map(str, adresses), channels=channels, options=all_lights.keys(), states=states)", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def convert_shaders(self):\n raise NotImplementedError()", "def filterToLight( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n lit = int(255*HSL[2]) # convert to 0-255 range\n bmp.pixels[h][w] = (lit,lit,lit)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def reference_scene(file_path, **kwargs):\n\n pass", "def turnOffEnvirLights(self):\n c_offEnvirLights = self.layer.createCollection(\"c_OffEnvirLights\")\n c_offEnvirLights.getSelector().setFilterType(4)\n c_offEnvirLights.getSelector().setPattern(\"*LIGHTS_ENVIR*\")\n o_offEnvirLgtVisibility = c_offEnvirLights.createOverride(\"offEnvirLgtVisibility\", override.AbsOverride.kTypeId)\n \n lgtEnvGrp = str(cmds.ls(\"*LIGHTS_ENVIR*\")[0])\n \n o_offEnvirLgtVisibility.finalize(lgtEnvGrp+\".visibility\")\n o_offEnvirLgtVisibility.setAttrValue(0)", "def render(self):\n # Remove existing fresnel geometries from the scene\n for geometry in self._geometries:\n geometry.remove()\n\n # Clear the list of fresnel geometries\n self._geometries = []\n\n # Add fresnel scene geometries from plato scene primitives\n for prim in self._primitives:\n geometry = prim.render(self._fresnel_scene)\n self._geometries.append(geometry)\n\n # Set up the camera\n camera_up = rowan.rotate(rowan.conjugate(self.rotation), [0, 1, 0])\n camera_position = rowan.rotate(rowan.conjugate(self.rotation), -self.translation)\n camera_look_at = camera_position + rowan.rotate(rowan.conjugate(self.rotation), [0, 0, -1])\n camera_height = self.size[1]/self.zoom\n try:\n orthographic_camera = fresnel.camera.Orthographic\n except AttributeError:\n # Support fresnel < 0.13.0\n orthographic_camera = fresnel.camera.orthographic\n self._fresnel_scene.camera = orthographic_camera(\n position=camera_position,\n look_at=camera_look_at,\n up=camera_up,\n height=camera_height)\n\n # Set up lights\n lights = []\n if 'ambient_light' in self.enabled_features:\n config = self.get_feature_config('ambient_light')\n magnitude = config.get('value', 0.25)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=(0, 0, 1),\n color=(magnitude, magnitude, magnitude),\n theta=np.pi))\n if 'directional_light' in self.enabled_features:\n config = self.get_feature_config('directional_light')\n directions = config.get('value', (.25, .5, -1))\n directions = np.atleast_2d(directions).astype(np.float32)\n for direction in directions:\n magnitude = np.linalg.norm(direction)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=-direction,\n color=(magnitude, magnitude, magnitude),\n theta=0.7))\n if len(lights) > 0:\n self._fresnel_scene.lights = lights\n\n # Set up tracer\n if 'pathtracer' in self.enabled_features:\n # Use path tracer if enabled\n config = self.get_feature_config('pathtracer')\n tracer = self._path_tracer\n samples = config.get('samples', 64)\n def render_function(scene, **kwargs):\n return tracer.sample(scene, samples, **kwargs)\n else:\n # Use preview tracer by default\n tracer = self._preview_tracer\n tracer.anti_alias = 'antialiasing' in self.enabled_features\n render_function = tracer.render\n\n self._output = render_function(self._fresnel_scene)", "def init_gl(self):\n\n # default background color is white-ish\n background = [.99, .99, .99, 1.0]\n # if user passed a background color use it\n if 'background' in self.kwargs:\n try:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.kwargs['background'])\n # convert to 0.0 - 1.0 float\n background = background.astype(np.float64) / 255.0\n except BaseException:\n log.error('background color wrong!',\n exc_info=True)\n # apply the background color\n gl.glClearColor(*background)\n\n max_depth = (np.abs(self.scene.bounds).max(axis=1) ** 2).sum() ** .5\n max_depth = np.clip(max_depth, 500.00, np.inf)\n gl.glDepthRange(0.0, max_depth)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_LIGHTING)\n gl.glEnable(gl.GL_LIGHT0)\n gl.glEnable(gl.GL_LIGHT1)\n\n # put the light at one corner of the scenes AABB\n gl.glLightfv(gl.GL_LIGHT0,\n gl.GL_POSITION,\n rendering.vector_to_gl(np.append(self.scene.bounds[1], 0)))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_SPECULAR,\n rendering.vector_to_gl(.5, .5, 1, 1))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,\n rendering.vector_to_gl(1, 1, 1, .75))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,\n rendering.vector_to_gl(.1, .1, .1, .2))\n\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(.5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n\n gl.glLineWidth(1.5)\n gl.glPointSize(4)", "async def Turn_On_Lights_With_Color(\n color: str = Path(..., title=\"Color name or hexadecimal string\")\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS, color)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": color,\n }", "def addNightLights(self,img,y):\n\t\t\n\t\tstartDate = ee.Date.fromYMD(y, 1, 1)\n\t\tendDate = ee.Date.fromYMD(y, 12, 31)\n\t\t\n\t\tif y < 2012:\n\t\t\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/DMSP-OLS/NIGHTTIME_LIGHTS\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"stable_lights\"]).rename([\"stable_lights\"]))\n\t\t\n\t\tif y >= 2012:\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"avg_rad\"]).rename([\"stable_lights\"]))\n\t\t\n\t\treturn img", "def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]", "def __str__(self):\n return \"HS2Lights(%d, %d, %d)\" % (self.lightLeft, self.lightCenter, self.lightRight)", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)", "def add_mesh_to_scene(sdk, scene, mesh, contentid):\n global n\n name = contentid+\"_\"+str(n)\n n+=1\n # Todo: pass scene instead?\n fbx_mesh = FbxMesh.Create(sdk, name)\n fbx_mesh.CreateLayer()\n layer0 = fbx_mesh.GetLayer(0)\n\n # Verts\n\n fbx_mesh.InitControlPoints(len(mesh.v))\n if RELOCATE_BRUSHES is True:\n print mesh.v\n #MM TRANSLATE BRUSHES\n filler=(0,0,0)\n newmeshv=[]\n for i, v in enumerate(mesh.v):\n if i==0:\n reference=v\n newmeshv.append(filler)\n else:\n newmeshv.append(tuple(numpy.subtract(v,reference)))\n print newmeshv\n mesh.v=newmeshv\n \n for i, v in enumerate(mesh.v):\n fbx_mesh.SetControlPointAt(as_fvec4(v, scale=100), i)\n\n layer_elt = create_fbx_layer(\n fbx_mesh, mesh.n, as_fvec4, FbxLayerElementNormal)\n if layer_elt is not None:\n layer0.SetNormals(layer_elt)\n\n layer_elt = create_fbx_layer(\n fbx_mesh, mesh.c, as_fcolor, FbxLayerElementVertexColor,\n allow_index = True,\n allow_allsame = True)\n if layer_elt is not None:\n layer0.SetVertexColors(layer_elt)\n\n # Tilt Brush may have 3- or 4-element UV channels, and may have multiple\n # UV channels. This only handles the standard case of 2-component UVs\n layer_elt = create_fbx_layer(\n fbx_mesh, mesh.uv0, as_fvec2, FbxLayerElementUV,\n allow_index = True)\n if layer_elt is not None:\n layer0.SetUVs(layer_elt, FbxLayerElement.eTextureDiffuse)\n pass\n\n layer_elt = create_fbx_layer(\n fbx_mesh, mesh.t, as_fvec4, FbxLayerElementTangent,\n allow_index = True)\n if layer_elt is not None:\n layer0.SetTangents(layer_elt)\n\n # Unity's FBX import requires Binormals to be present in order to import the\n # tangents but doesn't actually use them, so we just output some dummy data.\n layer_elt = create_fbx_layer(\n fbx_mesh, ((0, 0, 0, 0),), as_fvec4, FbxLayerElementBinormal,\n allow_allsame = True)\n if layer_elt is not None:\n layer0.SetBinormals(layer_elt)\n\n layer_elt = create_fbx_layer(\n fbx_mesh, (), lambda x: x, FbxLayerElementMaterial, allow_allsame = True)\n if layer_elt is not None:\n layer0.SetMaterials(layer_elt)\n\n # Polygons\n\n for triplet in mesh.tri:\n fbx_mesh.BeginPolygon(-1, -1, False)\n fbx_mesh.AddPolygon(triplet[0])\n fbx_mesh.AddPolygon(triplet[1])\n fbx_mesh.AddPolygon(triplet[2])\n fbx_mesh.EndPolygon()\n\n material = FbxSurfaceLambert.Create(sdk, mesh.brush_name)\n name=mesh.brush_name+\"_\"+str(mesh.c[0])+\"_\"+name\n \n if EXPORT_BRUSH_AREA is True:\n ps=[]\n for t in mesh.v:\n ps.append(list(t))\n #ps2=[]\n #for t in mesh.t:\n # ps2.append(list(t[0:3])) \n # print len(mesh.tri)\n #print len(mesh.v)\n #print ps\n print name+\",\"+str(poly_area(ps))\n #print poly_area(ps2)\n #poly = [[0, 3, 1], [0, 2, 3], [2, 5, 3], [2, 4, 5], [4, 7, 5], [4, 6, 7], [6, 9, 7], [6, 8, 9], [8, 11, 9], [8, 10, 11], [10, 13, 11], [10, 12, 13], [12, 15, 13], [12, 14, 15]]\n #print poly_area(poly) \n global polyareadata\n polyareadata.append(name+\",\"+str(poly_area(ps)))\n \n print name\n mm_save_mesh_metadata(name,mesh)\n #print mesh.brush_name #Roughly analagous to a material\n #print mesh.brush_guid\n #print mesh.v #list of positions (3-tuples)\n #print mesh.n #list of normals (3-tuples, or None if missing)\n #print mesh.uv0 #list of uv0 (2-, 3-, 4-tuples, or None if missing)\n #print mesh.uv1 #see uv0\n #print mesh.c #list of colors, as a uint32. abgr little-endian, rgba big-endian\n #print mesh.t #list of tangents (4-tuples, or None if missing)\n #print mesh.tri #list of triangles (3-tuples of ints)\n \n # Node tree\n\n root = scene.GetRootNode()\n node = FbxNode.Create(sdk, name)\n node.SetNodeAttribute(fbx_mesh)\n node.AddMaterial(material)\n node.SetShadingMode(FbxNode.eTextureShading) # Hmm\n root.AddChild(node)", "def dark(s='dark'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(2)\n camera.status.imgtype = 'DARK'\n camera.status.object = s\n camera.status.update()", "def get_lights(bridge):\n\n target_names = [\n \"Console Lamp\",\n \"Bedroom Table Lamp\",\n \"Kitchen light\",\n ]\n\n targets = [light for light in bridge.lights if light.name in target_names]\n\n if len(targets) != len(target_names):\n print(\"%s: not found ... %s\" % (target_names, targets))\n exit(1)\n\n return targets", "def set_light_on(self):\r\n self._light = \"ON\"" ]
[ "0.70548475", "0.6534689", "0.6220881", "0.6124191", "0.6103418", "0.5953071", "0.59508586", "0.592486", "0.5756125", "0.5698417", "0.5659417", "0.56495863", "0.55890954", "0.55873734", "0.5572616", "0.55421257", "0.54985136", "0.54800266", "0.54774386", "0.54722345", "0.54558796", "0.54512435", "0.5450689", "0.5436893", "0.5420839", "0.53971976", "0.5364416", "0.5355828", "0.5346665", "0.5330729", "0.5324171", "0.53156847", "0.53056145", "0.52885157", "0.5264599", "0.5258479", "0.5247654", "0.5242311", "0.5235987", "0.52254874", "0.5224041", "0.52196115", "0.521918", "0.5217763", "0.52144337", "0.5214424", "0.52019525", "0.51975584", "0.5183859", "0.518208", "0.51614773", "0.51584935", "0.5154576", "0.5154386", "0.51496035", "0.5148177", "0.51428187", "0.51322967", "0.5127613", "0.512272", "0.5122519", "0.5115751", "0.51132035", "0.5113145", "0.5105948", "0.5105235", "0.5093624", "0.50897676", "0.50890493", "0.50777626", "0.5075983", "0.5075467", "0.5075059", "0.5075059", "0.50727016", "0.5066547", "0.5054114", "0.5053813", "0.50499254", "0.50494146", "0.50197434", "0.5016316", "0.49936184", "0.49893287", "0.49872646", "0.49865186", "0.49832317", "0.4982736", "0.49763098", "0.49732453", "0.49721894", "0.49704945", "0.4956391", "0.49545977", "0.4944432", "0.49442267", "0.49394944", "0.49384534", "0.4930334", "0.4925913" ]
0.7741908
0
export all the lightlinking in the scene
def exportLightLinking(self): lights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a] allShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))] litLinks = {} for l in lights: lightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0) litLinks[l] = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT pickle.dump( litLinks, open( self.lightLinkPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def export_blend_connections():\n selection_list = pm.ls(tr=1, sl=1, l=1)\n\n dialog_return = pm.fileDialog2(cap=\"Save As\", fm=0, ff='Text Files(*.txt)')\n\n filename = dialog_return[0]\n print(filename)\n\n print(\"\\n\\nFiles written:\\n--------------------------------------------\\n\")\n\n with open(filename, 'w') as fileId:\n for i in range(0, len(selection_list)):\n shapes = pm.listRelatives(selection_list[i], s=True, f=True)\n\n main_shape = \"\"\n for j in range(0, len(shapes)):\n if pm.getAttr(shapes[j] + '.intermediateObject') == 0:\n main_shape = shapes\n break\n if main_shape == \"\":\n main_shape = shapes[0]\n\n con = pm.listConnections(main_shape, t=\"blendShape\", c=1, s=1, p=1)\n\n cmd = \"connectAttr -f %s.worldMesh[0] %s;\" % (\n ''.join(map(str, main_shape)),\n ''.join(map(str, con[0].name()))\n )\n print (cmd + \"\\n\")\n fileId.write(\"%s\\n\" % cmd)\n\n print(\"\\n------------------------------------------------------\\n\")\n print(\"filename: %s ...done\\n\" % filename)", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def _on_lowres_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_proxy()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def link_residues(self) -> None:\n ...", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def exportData(self):\n\t\tlays = rlayer.renderlayers()\n\t\tdata = {}\n\t\tfor l in lays:\n\t\t\tif l.name == 'defaultRenderLayer':\n\t\t\t\tcontinue\n\t\t\tdata[l.name] = {'objects':l.objects, # OBJECTS IN LAYER\n\t\t\t\t\t\t\t'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES\n\t\t\t\t\t\t\t'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS\n\t\t\t\t\t\t\t'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER\n\t\t\t\t\t\t\t}\n\t\tpickle.dump( data, open( self.dataPath.path, \"wb\" ) )", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def flicker_lights(self):\n print 'Lights Set'", "def viewAll(self):\n self._sceneviewer.viewAll()", "def load_morph_links():\n dtu_path = os.path.abspath(Definitions.EXPORT_DIR + \"\\FIG\\FIG0\")\n dtu_loader = DtuLoader.DtuLoader(dtu_path)\n morph_links = dtu_loader.get_morph_links_dict()\n return morph_links", "def lights(self):\n return list(self.GetLights())", "def batch_export_ortho():\r\n global path_to_project\r\n \r\n for path in path_to_project:\r\n export_filename = os.path.basename(path['ProjectPath']).replace('.psz','.tif')\r\n export_path = os.path.join(export_folder,export_filename)\r\n try:\r\n project = PhotoScan.app.document\r\n project.open(path['ProjectPath'])\r\n \r\n dx, dy = mosaic.get_resolution(path['Flight_id'], path['Field'], path['Camera'])\r\n \r\n if dx is not None and dy is not None:\r\n status = project.activeChunk.exportOrthophoto(\r\n export_path, format=\"tif\", color_correction=False, blending='average', dx=dx, dy=dy,\r\n projection=project.activeChunk.projection)\r\n else:\r\n status = project.activeChunk.exportOrthophoto(export_path, format=\"tif\", color_correction=False, blending='average',projection=project.activeChunk.projection)\r\n except Exception as e:\r\n print(e)\r\n if status is True:\r\n print(\"Perfect\")\r\n app = PhotoScan.Application()\r\n app.quit()", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def _add_links_from_mergers(self):\n for i, node_name in enumerate(self.node_list):\n self.builder.addDirectedLink(node_name, self, islot=i)", "def export(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net') #+ name)\n export_path = os.path.join(self.configuration['export_path'], 'exported_net_{}.pth'.format(name))\n batch_fixed = self.input[:,1,:,:,:]\n batch_moving = self.input[:,2,:,:,:]\n traced_script_module = torch.jit.trace(net, (batch_moving, batch_fixed))\n traced_script_module.save(export_path)", "def render(self):\n # Remove existing fresnel geometries from the scene\n for geometry in self._geometries:\n geometry.remove()\n\n # Clear the list of fresnel geometries\n self._geometries = []\n\n # Add fresnel scene geometries from plato scene primitives\n for prim in self._primitives:\n geometry = prim.render(self._fresnel_scene)\n self._geometries.append(geometry)\n\n # Set up the camera\n camera_up = rowan.rotate(rowan.conjugate(self.rotation), [0, 1, 0])\n camera_position = rowan.rotate(rowan.conjugate(self.rotation), -self.translation)\n camera_look_at = camera_position + rowan.rotate(rowan.conjugate(self.rotation), [0, 0, -1])\n camera_height = self.size[1]/self.zoom\n try:\n orthographic_camera = fresnel.camera.Orthographic\n except AttributeError:\n # Support fresnel < 0.13.0\n orthographic_camera = fresnel.camera.orthographic\n self._fresnel_scene.camera = orthographic_camera(\n position=camera_position,\n look_at=camera_look_at,\n up=camera_up,\n height=camera_height)\n\n # Set up lights\n lights = []\n if 'ambient_light' in self.enabled_features:\n config = self.get_feature_config('ambient_light')\n magnitude = config.get('value', 0.25)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=(0, 0, 1),\n color=(magnitude, magnitude, magnitude),\n theta=np.pi))\n if 'directional_light' in self.enabled_features:\n config = self.get_feature_config('directional_light')\n directions = config.get('value', (.25, .5, -1))\n directions = np.atleast_2d(directions).astype(np.float32)\n for direction in directions:\n magnitude = np.linalg.norm(direction)\n if magnitude > 0:\n lights.append(fresnel.light.Light(direction=-direction,\n color=(magnitude, magnitude, magnitude),\n theta=0.7))\n if len(lights) > 0:\n self._fresnel_scene.lights = lights\n\n # Set up tracer\n if 'pathtracer' in self.enabled_features:\n # Use path tracer if enabled\n config = self.get_feature_config('pathtracer')\n tracer = self._path_tracer\n samples = config.get('samples', 64)\n def render_function(scene, **kwargs):\n return tracer.sample(scene, samples, **kwargs)\n else:\n # Use preview tracer by default\n tracer = self._preview_tracer\n tracer.anti_alias = 'antialiasing' in self.enabled_features\n render_function = tracer.render\n\n self._output = render_function(self._fresnel_scene)", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def exports():", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def export_rainbows(rainbows, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n pov_mat_name = \"Default_texture\"\n for ob in rainbows:\n povdataname = ob.data.name # enough? XXX not used nor matrix fn?\n angle = degrees(ob.data.spot_size / 2.5) # radians in blender (2\n width = ob.data.spot_blend * 10\n distance = ob.data.shadow_buffer_clip_start\n # eps=0.0000001\n # angle = br/(cr+eps) * 10 #eps is small epsilon variable to avoid dividing by zero\n # width = ob.dimensions[2] #now let's say width of rainbow is the actual proxy height\n # formerly:\n # cz-bz # let's say width of the rainbow is height of the cone (interfacing choice\n\n # v(A,B) rotates vector A about origin by vector B.\n # and avoid a 0 length vector by adding 1\n\n # file.write(\"#declare %s_Target= vrotate(<%.6g,%.6g,%.6g>,<%.4g,%.4g,%.4g>);\\n\" % \\\n # (povdataname, -(ob.location.x+0.1), -(ob.location.y+0.1), -(ob.location.z+0.1),\n # ob.rotation_euler.x, ob.rotation_euler.y, ob.rotation_euler.z))\n\n direction = ( # XXX currently not used (replaced by track to?)\n ob.location.x,\n ob.location.y,\n ob.location.z,\n ) # not taking matrix into account\n rmatrix = global_matrix @ ob.matrix_world\n\n # ob.rotation_euler.to_matrix().to_4x4() * mathutils.Vector((0,0,1))\n # XXX Is result of the below offset by 90 degrees?\n up = ob.matrix_world.to_3x3()[1].xyz # * global_matrix\n\n # XXX TO CHANGE:\n # formerly:\n # tab_write(file, \"#declare %s = rainbow {\\n\"%povdataname)\n\n # clumsy for now but remove the rainbow from instancing\n # system because not an object. use lamps later instead of meshes\n\n # del data_ref[dataname]\n tab_write(file, \"rainbow {\\n\")\n\n tab_write(file, \"angle %.4f\\n\" % angle)\n tab_write(file, \"width %.4f\\n\" % width)\n tab_write(file, \"distance %.4f\\n\" % distance)\n tab_write(file, \"arc_angle %.4f\\n\" % ob.pov.arc_angle)\n tab_write(file, \"falloff_angle %.4f\\n\" % ob.pov.falloff_angle)\n tab_write(file, \"direction <%.4f,%.4f,%.4f>\\n\" % rmatrix.translation[:])\n tab_write(file, \"up <%.4f,%.4f,%.4f>\\n\" % (up[0], up[1], up[2]))\n tab_write(file, \"color_map {\\n\")\n tab_write(file, \"[0.000 color srgbt<1.0, 0.5, 1.0, 1.0>]\\n\")\n tab_write(file, \"[0.130 color srgbt<0.5, 0.5, 1.0, 0.9>]\\n\")\n tab_write(file, \"[0.298 color srgbt<0.2, 0.2, 1.0, 0.7>]\\n\")\n tab_write(file, \"[0.412 color srgbt<0.2, 1.0, 1.0, 0.4>]\\n\")\n tab_write(file, \"[0.526 color srgbt<0.2, 1.0, 0.2, 0.4>]\\n\")\n tab_write(file, \"[0.640 color srgbt<1.0, 1.0, 0.2, 0.4>]\\n\")\n tab_write(file, \"[0.754 color srgbt<1.0, 0.5, 0.2, 0.6>]\\n\")\n tab_write(file, \"[0.900 color srgbt<1.0, 0.2, 0.2, 0.7>]\\n\")\n tab_write(file, \"[1.000 color srgbt<1.0, 0.2, 0.2, 1.0>]\\n\")\n tab_write(file, \"}\\n\")\n\n # tab_write(file, \"texture {%s}\\n\"%pov_mat_name)\n write_object_modifiers(ob, file)\n # tab_write(file, \"rotate x*90\\n\")\n # matrix = global_matrix @ ob.matrix_world\n # write_matrix(file, matrix)\n tab_write(file, \"}\\n\")\n # continue #Don't render proxy mesh, skip to next object", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "async def collect_outlinks(self, all_frames: bool = False) -> None:", "def collectLinks(self, output):\n pass", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def get_light_list(self):\n return self.light_array", "def create_scene(self):\n\n c = config.Colors.background\n gr3.setbackgroundcolor(c[0], c[1], c[2], 1.0)\n gr3.clear()\n\n if self.results is None:\n return\n\n show_domains = self.settings.show_domains\n show_surface_cavities = self.settings.show_surface_cavities\n show_center_cavities = self.settings.show_center_cavities\n if show_center_cavities and self.results.center_cavities is not None:\n show_surface_cavities = False\n elif show_surface_cavities and self.results.surface_cavities is not None:\n show_domains = False\n\n self.objectids = [None]\n edges = self.results.atoms.volume.edges\n num_edges = len(edges)\n edge_positions = [edge[0] for edge in edges]\n edge_directions = [[edge[1][i]-edge[0][i] for i in range(3)] for edge in edges]\n edge_lengths = [sum([c*c for c in edge])**0.5 for edge in edge_directions]\n edge_radius = min(edge_lengths)/200\n if self.settings.show_bounding_box:\n gr3.drawcylindermesh(num_edges, edge_positions, edge_directions,\n [config.Colors.bounding_box]*num_edges,\n [edge_radius]*num_edges, edge_lengths)\n corners = list(set([tuple(edge[0]) for edge in edges] + [tuple(edge[1]) for edge in edges]))\n num_corners = len(corners)\n gr3.drawspheremesh(num_corners, corners,\n [config.Colors.bounding_box]*num_corners,\n [edge_radius]*num_corners)\n\n if self.settings.show_atoms and self.results.atoms is not None:\n visible_atom_indices = self.settings.visible_atom_indices\n if visible_atom_indices is not None:\n visible_atom_indices = [comp for comp in visible_atom_indices if 0 <= comp < self.results.atoms.number]\n else:\n visible_atom_indices = range(self.results.atoms.number)\n if len(visible_atom_indices) == 0:\n visible_atom_indices = None\n if visible_atom_indices is not None:\n visible_atom_indices = np.array(visible_atom_indices)\n gr3.drawspheremesh(len(visible_atom_indices),\n self.results.atoms.positions[visible_atom_indices],\n self.results.atoms.colors[visible_atom_indices],\n np.ones(len(visible_atom_indices))*config.OpenGL.atom_radius)\n if self.settings.show_bonds:\n bonds = self.results.atoms.bonds\n for start_index, target_indices in enumerate(bonds):\n if start_index not in visible_atom_indices:\n continue\n target_indices = np.array([i for i in target_indices if i in visible_atom_indices])\n if len(target_indices) == 0:\n continue\n start_position = self.results.atoms.positions[start_index]\n target_positions = self.results.atoms.positions[target_indices]\n directions = target_positions - start_position\n bond_lengths = la.norm(directions, axis=1)\n directions /= bond_lengths.reshape(len(directions), 1)\n gr3.drawcylindermesh(len(target_indices),\n target_positions,\n -directions,\n [config.Colors.bonds] * self.results.atoms.number,\n np.ones(bond_lengths.shape)*config.OpenGL.bond_radius,\n bond_lengths)\n\n if self.results is None:\n return\n if show_domains and self.results.domains is not None:\n self.draw_cavities(self.results.domains,\n config.Colors.domain, 'domain',\n self.settings.visible_domain_indices)\n if show_surface_cavities and self.results.surface_cavities is not None:\n self.draw_cavities(self.results.surface_cavities,\n config.Colors.surface_cavity, 'surface cavity',\n self.settings.visible_surface_cavity_indices)\n if show_center_cavities and self.results.center_cavities is not None:\n self.draw_cavities(self.results.center_cavities,\n config.Colors.center_cavity, 'center cavity',\n self.settings.visible_center_cavity_indices)", "def roads_all(osm_path): \n return (retrieve(osm_path,'lines',['highway'])).rename(columns={'highway': 'asset'})", "def _on_hires_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_hires()", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def assets():", "def iter_links(self):", "def define_materials():\n global robot\n robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))\n robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))\n robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))\n robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))", "def exportMasterLayerSettings(self):\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tmasterData = {}\n\t\tnodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals']\n\t\tmnNodes =[ mn.Node( n ) for n in nodes ]\n\t\tfor n in mnNodes:\n\t\t\tfor a in n.listAttr( se = True, v = True, w = True ):\n\t\t\t\ttry:\n\t\t\t\t\tmasterData[a] = a.v\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\tpickle.dump( masterData, open( self.masterPath.path, \"wb\" ) )", "def display_link_effect(self):\n\n self.bbox.flash()", "def transfer_landmarks(source_mesh, landmarks, target_mesh, out):\n import os\n import lacecore\n from .landmarks.landmarker import Landmarker\n from .landmarks.serialization import dump_landmarks\n\n landmarker = Landmarker.load(source_mesh_path=source_mesh, landmark_path=landmarks)\n\n for target_mesh_path in target_mesh:\n m = lacecore.load_obj(target_mesh_path, triangulate=True)\n landmarks_on_target_mesh = landmarker.transfer_landmarks_onto(m)\n if out is None:\n filename, _ = os.path.splitext(os.path.basename(target_mesh_path))\n out = filename + \".json\"\n dump_landmarks(landmarks_on_target_mesh, out)", "def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def objects_to_bmesh(objs, transform=True):\n\n # CAUTION: Removes/destroys custom layer props\n\n # Creates the mesh used to merge the entire scene\n bm_all = bmesh.new()\n\n # Adds the objects\" meshes to the bmesh\n for obj in objs:\n dprint(\"Preparing object {} for export...\".format(obj.name))\n # Creates a bmesh from the supplied object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Makes sure all layers exist so values don't get lost while exporting\n uv_layer = bm.loops.layers.uv.get(\"UVMap\")\n tex_layer = bm.faces.layers.tex.get(\"UVMap\")\n vc_layer = (bm.loops.layers.color.get(\"Col\") or\n bm.loops.layers.color.new(\"Col\"))\n env_layer = (bm.loops.layers.color.get(\"Env\") or\n bm.loops.layers.color.new(\"Env\"))\n env_alpha_layer = (bm.faces.layers.float.get(\"EnvAlpha\") or\n bm.faces.layers.float.new(\"EnvAlpha\"))\n va_layer = (bm.loops.layers.color.get(\"Alpha\") or\n bm.loops.layers.color.new(\"Alpha\"))\n texnum_layer = bm.faces.layers.int.get(\"Texture Number\")\n type_layer = (bm.faces.layers.int.get(\"Type\") or\n bm.faces.layers.int.new(\"Type\"))\n material_layer = (bm.faces.layers.int.get(\"Material\") or\n bm.faces.layers.int.new(\"Material\"))\n\n # Removes the parent for exporting and applies transformation\n parent = obj.parent\n if parent:\n mat = obj.matrix_world.copy()\n old_mat = obj.matrix_basis.copy()\n obj.parent = None\n obj.matrix_world = mat\n\n spc = obj.matrix_basis\n bmesh.ops.scale(\n bm,\n vec=obj.scale,\n space=spc,\n verts=bm.verts\n )\n if transform:\n bmesh.ops.transform(\n bm,\n matrix=Matrix.Translation(obj.location),\n space=spc,\n verts=bm.verts\n )\n bmesh.ops.rotate(\n bm,\n cent=obj.location,\n matrix=obj.rotation_euler.to_matrix(),\n space=spc,\n verts=bm.verts\n )\n\n # Restores the parent relationship\n if parent and not obj.parent:\n obj.parent = parent\n obj.matrix_basis = old_mat\n\n # Converts the transformed bmesh to mesh\n new_mesh = bpy.data.meshes.new(\"ncp_export_temp\")\n bm.to_mesh(new_mesh)\n\n # Adds the transformed mesh to the big bmesh\n bm_all.from_mesh(new_mesh)\n\n # Removes unused meshes\n bpy.data.meshes.remove(new_mesh, do_unlink=True)\n bm.free()\n\n return bm_all", "def write_fbx_meshes(meshes, outf_name):\n import FbxCommon\n global n\n (sdk, scene) = FbxCommon.InitializeSdkObjects()\n\n docInfo = FbxDocumentInfo.Create(sdk, 'DocInfo')\n docInfo.Original_ApplicationVendor.Set('Google')\n docInfo.Original_ApplicationName.Set('Tilt Brush')\n docInfo.LastSaved_ApplicationVendor.Set('Google')\n docInfo.LastSaved_ApplicationName.Set('Tilt Brush')\n scene.SetDocumentInfo(docInfo)\n \n contentid=os.path.splitext(outf_name)[0]\n\n if EXPORT_RELOCATION==1 or RELOCATE_BRUSHES is False:\n for mesh in meshes:\n add_mesh_to_scene(sdk, scene, mesh,contentid)\n FbxCommon.SaveScene(sdk, scene, \"ALLINONE\")\n \n if EXPORT_RELOCATION==2:\n #BRUSH IN INDIVIDUAL FILE\n mesh=meshes\n add_mesh_to_scene(sdk, scene, mesh,contentid)\n FbxCommon.SaveScene(sdk, scene, mesh.brush_name+\"_\"+str(mesh.c[0])+\"_\"+contentid+\"_\"+str(n))", "def makeLibrary(self):\n #------------------------------------------ Instance for the output file\n outputFile = open(\"%s/%s\" % (self.sceneryPath,self.libTxtFileName),\"w\")\n #------------------------------------------------------ write the header\n for line in self.header:\n outputFile.write(\"%s\\n\" % (line))\n #------------------------------------------------- Loop over all folders\n packageContent = os.walk(self.sceneryPath)\n for folder in packageContent:\n for fileName in folder[2]:\n fileType = fileName.split(\".\")[-1]\n if fileType in self.objectTypes:\n realPath = folder[0][len(self.sceneryPath)+1:].replace(\"\\\\\",\"/\")\n filePath = \"%s/%s\" % (realPath,fileName)\n print filePath\n outputFile.write(\"EXPORT %s%s %s%s\\n\" % (self.libPrefix,filePath,self.realPathPrefix,filePath))\n outputFile.close()", "def objects(self):", "def _export_reference_representations(self):\n\n self.logger.msg1(\"Saving reference representations\")\n general_refset, _ = get_refsets(self.dbpath)\n general_refset.save(self.rootpath+\"-references\", \"phenotype\")", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def writePointLight(self,view,renderer):\n # get location, color, power\n pl = view.Source.PropertiesList\n\n try:\n location = view.Source.Location\n color = view.Source.Color\n except AttributeError:\n FreeCAD.Console.PrintError(translate(\"Render\",\"Cannot render Point Light: Missing location and/or color attributes\"))\n return \"\"\n power = getattr(view.Source,\"Power\",60) # We accept missing Power (default value: 60)...\n\n # send everything to renderer module\n return renderer.writePointLight(view,location,color,power)", "def link_dihedra(self, verbose: bool = ...) -> None:\n ...", "def _publish_objects(self):\n\n for obj in self._cozmo.world.visible_objects:\n now = rospy.Time.now()\n x = obj.pose.position.x * 0.001\n y = obj.pose.position.y * 0.001\n z = obj.pose.position.z * 0.001\n q = (obj.pose.rotation.q1, obj.pose.rotation.q2, obj.pose.rotation.q3, obj.pose.rotation.q0)\n self._tfb.send_transform(\n (x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame\n )\n \n try:\n if obj.cube_id and self.target_cube != obj:\n self._tfb.send_transform((x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame)\n print(\"Found {}\".format(obj.cube_id))\n if not self.cube_found and self.robots_distance_to_object(self._cozmo, obj) < 400:\n self.target_cube = obj\n self.cube_found = True\n print(\"Locking on to {}\".format(obj.cube_id))\n else:\n if self.cube_found:\n print(\"Found that one already!\")\n else:\n print(\"Cube too far away!\")\n \n except:\n # print('OBJECT IS NOT A LIGHT CUBE')\n if(obj==self._cozmo.world.charger):\n return\n if(obj.object_type==CustomObjectTypes.CustomType00 and (self.front_wall_pose == None or not self.front_wall_pose.is_accurate)):\n self.front_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Front', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType01 and (self.ramp_bottom_pose == None or not self.ramp_bottom_pose.is_accurate)):\n self.ramp_bottom_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Ramp', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType03 and (self.drop_spot_pose == None or not self.drop_spot_pose.is_accurate)):\n self.drop_spot_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Drop', self._odom_frame)\n print('*** Comzmo has found the drop Spot! ***')\n if(obj.object_type==CustomObjectTypes.CustomType04 and (self.back_wall_pose == None or not self.back_wall_pose.is_accurate)):\n self.back_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Back', self._odom_frame)\n print('*** Comzmo has found the back wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType05 and (self.drop_target_pose == None or not self.drop_target_pose.is_accurate)):\n self.drop_target_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Target', self._odom_frame)\n print('*** Comzmo has found the Dropt Target! ***')\n if(obj.object_type==CustomObjectTypes.CustomType06 and (self.drop_clue_pose == None or not self.drop_clue_pose.is_accurate)):\n self.drop_clue_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Clue', self._odom_frame)\n print('*** Comzmo has found the Dropt Clue! ***')", "def _on_collections_export(self, evt=None):\n \n # remove old exports\n for name in os.listdir(self._library.library_path):\n if EXPORT_PATTERN.match(name):\n os.remove(os.path.join(self._library.library_path, name))\n \n # get collections\n collections = self._library.search(core.Query(\"\", core.Collection.NAME))\n collections = [c for c in collections if c.export]\n \n # export collections\n for collection in collections:\n \n # get query\n if collection.query:\n query = core.Query(collection.query, core.Article.NAME)\n else:\n query = core.Query(\"%s[COLLECTIONID]\" % collection.dbid, core.Article.NAME)\n \n # get articles\n articles = self._library.search(query)\n \n # make export\n text = \"\"\n for article in articles:\n text += article.format(\"PDF: [PDF]\\n[TI]\\n[AU]\\n[CI]\\n\\n\")\n \n # init filename and path\n filename = \"_export_\"\n filename += collection.title.replace(\" \", \"_\")\n filename += \".txt\"\n path = os.path.join(self._library.library_path, filename)\n \n # save to file\n with open(path, 'w', encoding=\"utf-8\") as export:\n export.write(text)", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def export(self):\n self.output = []\n # This is not the best way of catching errors, but timesketch_utils will be\n # deprecated soon.\n # TODO: Consider using the official Timesketch python API.\n if not self.timesketch_api.session:\n return\n self.timesketch_api.export_artifacts(self.paths, self.sketch_id)\n self.sketch_url = self.timesketch_api.get_sketch_url(self.sketch_id)\n self.console_out.StdOut(\n 'Your Timesketch URL is: {0:s}'.format(self.sketch_url))\n self.output.append(self.sketch_url)", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def loadMultiple(method, *args):\n\n ### Declaring attributes\n selectedCurve = selectedMesh = None\n minRangeX = minRangeY = minRangeZ = maxRangeX = maxRangeY = maxRangeZ = 0\n selectedObjects = []\n\n ### Query UI values\n # Choise between standin / assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # List of all asset icons on UI\n objectIconsList = cmds.layout(objectScroll, query=True, childArray=True)\n # Amount of copies\n buildingAmount = cmds.intSliderGrp(SpawnObjectsTab.BuildingAmount, query=True, value=True)\n # Deviation from original rotation\n rotationVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomRotation, query=True, value=True)\n # Deviation from original scale\n scaleVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomScale, query=True, value=True)\n\n ### Iterate over each asset icon\n for obj in objectIconsList:\n\n # Append to list if the asset is selected\n isSelected = cmds.iconTextCheckBox(obj, query=True, value=True)\n\n if isSelected:\n selectedObjects.append(cmds.iconTextCheckBox(obj, query=True, label=True))\n\n # Exit the function if no asset is selected\n if not selectedObjects:\n return\n \n # Reference to the function that will scatter the copies\n scatteringFunction = None\n\n ### The user chose \"curve\"\n if method == \"curve\":\n \n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnCurve\n\n # Get curve reference\n selectedCurve = cmds.ls(selection=True)\n if not selectedCurve:\n return\n selectedCurve = selectedCurve[0]\n\n ### The user chose \"range\"\n if method == \"range\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnRange\n\n # Query minimum values from floatField\n minValues = cmds.floatFieldGrp(SpawnObjectsTab.MinimumField, query=True, value=True)\n minRangeX, minRangeY, minRangeZ = minValues[0], minValues[1], minValues[2]\n # Query maximum values from floatField\n maxValues = cmds.floatFieldGrp(SpawnObjectsTab.MaximumField, query=True, value=True)\n maxRangeX, maxRangeY, maxRangeZ = maxValues[0], maxValues[1], maxValues[2]\n\n ### The user chose \"mesh\"\n if method == \"mesh\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnMesh\n\n # Get reference of selected object\n selectedMesh = cmds.ls(selection=True)\n if not selectedMesh:\n return\n selectedMesh = selectedMesh[0]\n\n # Create group for the spawned copies \n finalGroup = cmds.group(name=\"CurveAssetGroup\", empty=True)\n cmds.select(clear=True)\n\n ### Iterate over the generated positions of the function with given parameters\n # scatteringFunction is a reference to a function in ObjectScattering.py\n # these functions are generators, they yield a value and we can iterate\n # to get the next value generated.\n for position in scatteringFunction(objectCount=buildingAmount, curve=selectedCurve,\n minX=minRangeX, minY=minRangeY, minZ=minRangeZ, maxX=maxRangeX, maxY=maxRangeY, maxZ=maxRangeZ,\n mesh=selectedMesh):\n \n # Randomly instance an asset from the selectedObjects list\n asset = AssetIcon(random.choice(selectedObjects))\n loadedAssetNode = None\n\n # Create copy based on the mode selected by the user\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n\n # Move this copy to the generated position\n cmds.move(position[0], position[1], position[2], loadedAssetNode, absolute=True)\n\n # If there is a fourth index on the position, that means we have rotation info\n # use that info to rotate the asset.\n # It is used to match an objects rotation to a face normal.\n if len(position) == 4:\n cmds.rotate(position[3][0], position[3][1], position[3][2], loadedAssetNode, absolute=True)\n \n # Add random rotation\n angle = random.uniform(-rotationVariation, rotationVariation)\n cmds.rotate(angle, loadedAssetNode, y=True, relative=True, objectSpace=True)\n\n # Add random scale\n newScale = random.uniform(1, 1+scaleVariation)\n cmds.scale(newScale, newScale, newScale, loadedAssetNode, absolute=True)\n\n #cmds.FreezeTransformations(loadedAssetNode)\n\n # Parent copy to group\n cmds.parent(loadedAssetNode, finalGroup)", "def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))", "def _add_lamp_outlets(self):\r\n lst = self.model.get_all_lamp_outlets()\r\n\r\n for itm in lst:\r\n self._add_lamp_outlet(itm)", "def view_registry(self) -> None:\n\n arr = self.load_links()[0]\n for i,v in enumerate(arr):\n print(f\"<{i}: {v}>\\n\")\n pass", "def handout_links(self):\r\n return self.q(css='section.handouts ol li a').map(lambda el: el.get_attribute('href')).results", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def reference_scene(file_path, **kwargs):\n\n pass", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def on_exit(self, next_scene):", "def onExport( self ):\n self.__exportButton.text = \"Working...\"\n slicer.app.processEvents()\n\n outputDir = os.path.abspath( self.__dirButton.directory )\n outputFile = os.path.join( outputDir, 'index.html' )\n\n try:\n output = self.logic.export( self.__captionCombobox.currentIndex, self.__copyCheckbox.checked, outputDir )\n except Exception as e:\n # maybe the scene was not saved?\n qt.QMessageBox.warning( None, 'Error', 'Please make sure the scene was saved before attempting to export to WebGL!' )\n self.__exportButton.text = \"Export to WebGL\"\n return\n\n if self.__serverCheckbox.checked and useWebserver:\n # start server\n os.chdir( outputDir )\n\n # if we have already a httpd running, kill it now\n # it will likely leave an orphaned process but since we mark it killed,\n # slicer will destroy it on exit\n if self.__httpd:\n self.__p.terminate()\n # increase the port\n self.__port += 1\n\n # check first if the port is available (since we open it as a new process we can not check later)\n portFree = False\n while not portFree:\n try:\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n s.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )\n s.bind( ( \"\", self.__port ) )\n except socket.error, e:\n portFree = False\n self.__port += 1\n finally:\n s.close()\n portFree = True\n\n # we need to break out of the pythonQt context here to make multiprocessing work\n import sys\n sys.stdin = sys.__stdin__\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n self.__handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n self.__httpd = SocketServer.TCPServer( ( \"\", self.__port ), self.__handler )\n self.__p = m.Process( target=self.__httpd.serve_forever )\n self.__p.start()\n\n url = 'http://localhost:' + str( self.__port ) + '/index.html'\n else:\n # no server\n url = outputFile\n\n with open( outputFile, 'w' ) as f:\n f.write( output )\n\n self.__exportButton.text = \"Export to WebGL\"\n\n if self.__viewCheckbox.checked:\n time.sleep( 1 )\n webbrowser.open_new_tab( url )", "def main():\n GRAPH = lambda_graph()\n GRAPH.save_graph(\"pylon\")\n meshName = \"pylon.mesh\"\n cmd = \"./population/linuxShow \"+meshName\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n process.communicate()\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()", "def get_meshes(scene):\r\n # all the instances we want to duplicate and change the source\r\n instances = []\r\n # the mesh we want to use as the new source\r\n replacement_mesh = None\r\n # the original mesh\r\n original_mesh = None\r\n\r\n for item in scene.selected:\r\n if item.isAnInstance:\r\n instances.append(item)\r\n original_mesh = item.itemGraph(\"meshInst\").connectedItems[\"Reverse\"][0]\r\n else:\r\n replacement_mesh = item\r\n return [instances, replacement_mesh, original_mesh]", "def _open_output_files(self):\n self.links_outfile = open(self.opts.links_outfile, 'wb')", "def planes_with_light_profiles(tracer):\n # NOTE: Find all planes with light profiles\n # NOTE:\n # # image = tracer.galaxies[1].profile_image_from_grid(grid=grid)\n # # plt.figure()\n # # plt.imshow(image.in_2d)\n # # plt.show()\n #\n # # asd = list(map(lambda plane: plane.has_light_profile, tracer.planes))\n # # print(asd)\n # #print(tracer.planes)\n #\n # #print(tracer.has_light_profile)\n # #print(list(map(lambda plane: plane.has_light_profile, tracer.planes)))\n # #print(tracer.galaxies_with_light_profile)\n #\n # #print(tracer.planes[1].galaxies_with_light_profile)\n #\n # galaxies = tracer.planes[1].galaxies_with_light_profile\n # galaxy = galaxies[0]\n #\n # galaxy_light_profiles = galaxy.light_profiles\n #\n # image_0 = galaxy_light_profiles[0].profile_image_from_grid(grid=grid)\n # image_0_in_2d = image_0.in_2d\n #\n # image_1 = galaxy_light_profiles[1].profile_image_from_grid(grid=grid)\n # image_1_in_2d = image_1.in_2d", "def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n self.__importLogFiles.append(self.__logFiles[-1])\r\n \r\n command = (\"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"NewScene, false\\n\")\r\n if (FUtils.GetExtension(filename) == \"dae\"):\r\n command = (command + \r\n \"set myIProp = CreateImportFTKOptions()\\n\" +\r\n \"myIProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n filename.replace(\"\\\\\", \"\\\\\\\\\") +\"\\\"\\n\" +\r\n \"myIProp.Parameters(\\\"Verbose\\\").Value = True\\n\")\r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__IMPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myIProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + value + \"\\n\")\r\n command = command + \"ImportFTK myIProp.Name \\n\"\r\n elif (FUtils.GetExtension(filename) == \"scn\"):\r\n command = (command +\r\n \"OpenScene \\\"\" + filename.replace(\"\\\\\",\"\\\\\\\\\") + \"\\\"\\n\")\r\n else: \r\n return\r\n \r\n self.__currentImportProperName = FUtils.GetProperFilename(filename)\r\n basename = self.__currentImportProperName + \".scn\"\r\n\r\n# self.__script.write(\r\n# command +\r\n# \"SearchAndReplacePath \\\"All\\\", \\\"\" + FXsi.__REPLACE_PATH + \r\n# \"\\\", \\\"\" + \r\n# os.path.dirname(filename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n# \"\\\", True\\n\" +\r\n# \"SaveSceneAs \\\"\" + \r\n# os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n# \"\\\"\\n\"\r\n# )\r\n \r\n self.__script.write(\r\n command +\r\n \"SaveSceneAs \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n \"\\\"\\n\"\r\n )\r\n \r\n self.__testCount = self.__testCount + 1\r\n \r\n return [basename,]", "def logosmall(self):\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['medium_user']\n url = self.app.url_for(\"asset\", asset_id = v._id)\n return \"\"\"<a href=\"%s\"><img src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug),\n url,\n v.metadata['width'],\n v.metadata['height'])", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def afterLoadSceneObject(self):\n\t\tpass", "def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n logger_name = thisfile + '->extract_intrinsic_images_from_lighting_passes()'\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0) # sum components\n ch_arrays.append(ch_array)\n # Handle alpha channel\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(), \\\n \"Alpha channels of all passes must be the same\"\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n\n # Albedo\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n # Shading\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n # Specularity\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png'))\n # Reconstruction vs. ...\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3] # can't add up alpha channels\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n # ... composite from Blender, just for sanity check\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info(\"Intrinsic images extracted to %s\", outdir)", "def rtsobjects():\n pass", "def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image", "def WriteExport(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n if (self.__currentImportProperName == None): return\r\n \r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n basename = self.__currentImportProperName + \".dae\"\r\n \r\n command = \"\"\r\n \r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__EXPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myEProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + \r\n setting.GetValue() + \"\\n\")\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n \r\n self.__script.write(\r\n \"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"set myEProp = CreateExportFTKOptions()\\n\"\r\n \"myEProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n \"\\\"\\n\" +\r\n \"myEProp.Parameters(\\\"Format\\\").Value = 1\\n\"\r\n \"myEProp.Parameters(\\\"Verbose\\\").Value = True\\n\" +\r\n command +\r\n \"ExportFTK myEProp.Name\\n\"\r\n )\r\n \r\n return [basename,]", "def light_sync(self):", "def import_scene(file_path):\n\n pass", "def writeVRMLFile(objects, filepath, used_color_keys, licence_info=None):\n used_colors = None\n if used_color_keys is not None:\n used_colors = { x: shaderColors.named_colors[x] for x in used_color_keys }\n say(used_color_keys)\n say(used_colors.values())\n with open(filepath, 'w') as f:\n # write the standard VRML header\n f.write(\"#VRML V2.0 utf8\\n#kicad StepUp wrl exported\\n\\n\")\n if licence_info is not None:\n for line in licence_info:\n f.write('# '+line + '\\n')\n for shader_color in used_colors.values():\n f.write(shader_color.toVRMLdefinition())\n\n for obj in objects:\n f.write(\"Shape { geometry IndexedFaceSet \\n{ coordIndex [\")\n # write coordinate indexes for each face\n f.write(','.join(\"%d,%d,%d,-1\" % f for f in obj.faces))\n f.write(\"]\\n\") # closes coordIndex\n f.write(\"coord Coordinate { point [\")\n # write coordinate points for each vertex\n #f.write(','.join('%.3f %.3f %.3f' % (p.x, p.y, p.z) for p in obj.points))\n f.write(','.join('%.3f %.3f %.3f' % (p.x, p.y, p.z) for p in obj.points))\n f.write(\"]\\n}\") # closes Coordinate\n #shape_col=(1.0, 0.0, 0.0)#, 0.0)\n f.write(\"}\\n\") # closes points\n\n #say(color_list_mat[col_index])\n if not isinstance(obj.color,basestring) or isinstance(used_colors, basestring):\n shape_transparency=obj.transp\n f.write(\"appearance Appearance{material Material{diffuseColor %f %f %f\\n\" % obj.color)\n f.write(\"transparency %f}}\" % shape_transparency)\n else:\n #say(obj.color)\n f.write(used_colors[obj.color].toVRMLuseColor())\n f.write(\"}\\n\") # closes shape\n say(filepath+' written')", "def linking_library_dirs(self):", "def update_objects_and_lights(latents, material_names, update_lights):\n\n objects_latents = np.array_split(latents, (len(latents) - 1) // 8)\n\n max_object_size = max(\n [max(o.dimensions) for o in bpy.data.objects if \"Object_\" in o.name]\n )\n\n for i, (object_latents, material_name) in enumerate(\n zip(objects_latents, material_names)\n ):\n # find correct object name\n object_name = None\n for obj in bpy.data.objects:\n if obj.name.endswith(f\"Object_{i}\"):\n object_name = obj.name\n break\n assert object_name is not None\n\n # update object location and rotation\n object = bpy.data.objects[object_name]\n object.location = (\n object_latents[0],\n object_latents[1],\n object_latents[2] + max_object_size / 2,\n )\n object.rotation_euler = tuple(object_latents[3:6])\n\n # update object color\n rgba_object = colorsys.hsv_to_rgb(\n object_latents[7] / (2.0 * np.pi), 1.0, 1.0\n ) + (1.0,)\n render_utils.change_material(\n bpy.data.objects[object_name].data.materials[-1], Color=rgba_object\n )\n\n if update_lights:\n # update light color\n rgb_light = colorsys.hsv_to_rgb(object_latents[8] / (2.0 * np.pi), 0.8, 1.0)\n bpy.data.objects[f\"Spotlight_Object_{i}\"].data.color = rgb_light\n # update light location\n bpy.data.objects[f\"Spotlight_Object_{i}\"].location = (\n 4 * np.sin(object_latents[6]),\n 4 * np.cos(object_latents[6]),\n 6 + max_object_size,\n )" ]
[ "0.780936", "0.6795471", "0.5878799", "0.5863147", "0.5783751", "0.57724273", "0.5699046", "0.5667319", "0.5579326", "0.5527161", "0.5456355", "0.54548293", "0.5452703", "0.5451087", "0.54337513", "0.5404285", "0.5384086", "0.5363154", "0.53607535", "0.53221077", "0.53212357", "0.52984256", "0.5261833", "0.5256882", "0.5253868", "0.5237455", "0.52366996", "0.52057", "0.5196203", "0.51946366", "0.51774764", "0.51668984", "0.5158538", "0.51309663", "0.51290226", "0.5119282", "0.50969034", "0.50713664", "0.5070472", "0.504567", "0.5037904", "0.5000262", "0.49981457", "0.49980685", "0.4994085", "0.4982695", "0.49668178", "0.4958057", "0.49489543", "0.49420395", "0.49415457", "0.49368656", "0.49249855", "0.49247292", "0.49163055", "0.49124324", "0.49082434", "0.49002635", "0.48943403", "0.489211", "0.4886526", "0.48841178", "0.48808494", "0.48546433", "0.4851545", "0.4847662", "0.4847406", "0.48324382", "0.4824714", "0.48207748", "0.4817217", "0.48093247", "0.48004574", "0.47993854", "0.47981822", "0.47944412", "0.47942105", "0.4792018", "0.47919846", "0.47897425", "0.47872633", "0.47849095", "0.4783177", "0.47831216", "0.4776635", "0.47663248", "0.4765313", "0.47575077", "0.47551242", "0.47551242", "0.4750936", "0.47508138", "0.4750795", "0.47426763", "0.4739299", "0.47287783", "0.47194415", "0.47150943", "0.47130322", "0.47091648" ]
0.8114935
0
export aovs from scene
def exportAovs(self): aovs = mc.ls( typ = 'aiAOV' ) aovData = {} for a in aovs: aovData[a] = {} aovData[a]['enabled'] = mc.getAttr( a + '.enabled' ) aovData[a]['name'] = mc.getAttr( a + '.name' ) aovData[a]['type'] = mc.getAttr( a + '.type' ) pickle.dump( aovData, open( self.aovsPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def create_scene(self):\n \n self.scene=soya.World()", "def import_scene(file_path):\n\n pass", "def exports():", "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def export( self, captionMode, copyFiles, outputDir ):\n scene = slicer.mrmlScene\n nodes = scene.GetNumberOfNodes()\n\n self.__nodes = {}\n\n # 1 for model name, 2 for parent name\n self.__captionMode = captionMode\n # TRUE if we shall copy the files to the outputDir\n self.__copyFiles = copyFiles\n self.__outputDir = outputDir\n\n self.__tree = Tree()\n self.__tree.create_node( \"Scene\", \"scene\" )\n\n for n in xrange( nodes ):\n\n node = scene.GetNthNode( n )\n\n self.parseNode( node )\n\n [header, footer] = self.configureXrenderers()\n output = header\n output += self.createXtree( \"scene\" )\n output += footer\n\n return output", "def save_scene(force=True, **kwargs):\n\n pass", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def importAovs(self):\n\t\tLayersInfo = pickle.load( open( self.aovsPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tfor ao in LayersInfo.keys():\n\t\t\taov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] )\n\t\tmc.refresh( su = 0 )", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def open_scene(file_path, save=True):\n\n pass", "def __render_scene(self, scene):\n\n # Name and location of the exported project.\n project_dir = os.path.join(tempfile.gettempdir(), \"blenderseed\", \"render\")\n project_filepath = os.path.join(project_dir, \"render.appleseed\")\n\n # Create target directories if necessary.\n if not os.path.exists(project_dir):\n try:\n os.makedirs(project_dir)\n except os.error:\n self.report({\"ERROR\"}, \"The directory {0} could not be created. Check directory permissions.\".format(project_dir))\n return\n\n # Generate project on disk.\n self.update_stats(\"\", \"appleseed Rendering: Exporting Scene\")\n writer = projectwriter.Writer()\n writer.write(scene, project_filepath)\n\n # Render project.\n self.__render_project_file(scene, project_filepath, project_dir)", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def exportAssetAssembly(name, rigTopNode, meshTopNode, path, postScript=None):\n if pm.ls(rigTopNode):\n rigTopNode = pm.PyNode(rigTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check your \"\n \"scene\".format(rigTopNode))\n return\n\n if pm.ls(meshTopNode):\n meshTopNode = pm.PyNode(meshTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check \"\n \"your scene\".format(meshTopNode))\n return\n # check the folder and script\n # if the target name exist abort and request another name\n\n deformer_jnts = rigTopNode.rigGroups[3].connections()[0].members()\n if not deformer_jnts:\n pm.displayError(\n \"{} is empty. The tool can't find any joint\".format(meshTopNode))\n\n # export connections and cut joint connections\n file_path = os.path.join(path, name + \".jmm\")\n dm_nodes = exportConnections(source=deformer_jnts,\n filePath=file_path,\n disc=True)\n\n # cut al possible remaining connection and adjust hierarchy\n # joint or visibility\n jnt_org = pm.PyNode(\"jnt_org\")\n pm.disconnectAttr(rigTopNode.jnt_vis, jnt_org.visibility)\n\n # restructure model\n model = pm.createNode(\"transform\",\n n=\"model\",\n p=None,\n ss=True)\n pm.addAttr(model, ln=\"rigGroups\", at='message', m=1)\n pm.parent(meshTopNode, jnt_org, model)\n\n # disconnect jnt set\n sets = rigTopNode.listConnections(type=\"objectSet\")\n\n deformersGrp = None\n for oSet in sets:\n if \"deformers_grp\" in oSet.name():\n deformersGrp = oSet\n\n if deformersGrp:\n for cnx in deformersGrp.message.listConnections(p=True):\n pm.disconnectAttr(deformersGrp.message, cnx)\n pm.connectAttr(deformersGrp.message, model.attr(\"rigGroups[0]\"))\n\n # disconnect bindPoses\n dg_poses = rigTopNode.message.listConnections(type=\"dagPose\", p=True)\n for dgp in dg_poses:\n if dgp.node().name().startswith(\"bindPose\"):\n pm.disconnectAttr(rigTopNode.message, dgp)\n\n # post script\n if postScript:\n try:\n exec(compile(open(postScript, \"rb\").read(), postScript, 'exec'))\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n pm.displayError(message)\n cont = pm.confirmBox(\"FAIL: Script Fail\",\n \"Do you want to export anyway?\" + \"\\n\\n\"\n + message + \"\\n\\n\" + traceback.format_exc(),\n \"Continue\", \"Cancel\")\n if not cont:\n pm.undo()\n return\n\n # export rig model\n pm.select(dm_nodes, r=True)\n pm.select(rigTopNode, add=True)\n file_path = os.path.join(path, name + \"_rig.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)\n\n # export mesh and joints\n pm.select(model, r=True)\n file_path = os.path.join(path, name + \"_model.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)", "def scene_name():\n\n pass", "def __init__(self, scene: Scene):\n self.scene = scene", "def __init__(self, scene: Scene):\n super(SceneGUI, self).__init__()\n\n self.scene = scene # save instance of Scene class to this object\n if scene.photons.size == 0:\n raise(Exception, \"no data stored in scene\")\n\n # QImage require data to be 32 bit aligned. Thus, we need to make sure out_size is even\n out_size = (round(scene.n_rows * 150/scene.n_cols)*2, 300)\n self.image = imresize(scene.srgb, out_size, interp='nearest')\n\n # set status bar\n self.statusBar().showMessage(\"Ready\")\n\n # set menu bar\n menu_bar = self.menuBar()\n menu_file = menu_bar.addMenu(\"&File\")\n menu_plot = menu_bar.addMenu(\"&Plot\")\n\n # add load scene to file menu\n load_scene = QtGui.QAction(\"Load Scene\", self)\n load_scene.setStatusTip(\"Load scene from file\")\n load_scene.triggered.connect(self.menu_load_scene)\n menu_file.addAction(load_scene)\n\n # add save scene to file menu\n save_scene = QtGui.QAction(\"Save Scene\", self)\n save_scene.setStatusTip(\"Save scene to file\")\n save_scene.setShortcut(\"Ctrl+S\")\n save_scene.triggered.connect(self.menu_save_scene)\n menu_file.addAction(save_scene)\n\n # add illuminant energy to plot menu\n plot_il_energy = QtGui.QAction(\"Illuminant (Energy)\", self)\n plot_il_energy.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_energy.triggered.connect(lambda: self.scene.plot(\"illuminant energy\"))\n menu_plot.addAction(plot_il_energy)\n\n # add illuminant photons to plot menu\n plot_il_quanta = QtGui.QAction(\"Illuminant (Photons)\", self)\n plot_il_quanta.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_quanta.triggered.connect(lambda: self.scene.plot(\"illuminant photons\"))\n menu_plot.addAction(plot_il_quanta)\n\n # set up left panel\n left_panel = self.init_image_panel()\n\n # set up right panel\n right_panel = self.init_control_panel()\n\n splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n splitter.addWidget(left_panel)\n splitter.addWidget(right_panel)\n\n QtGui.QApplication.setStyle(QtGui.QStyleFactory().create('Cleanlooks'))\n\n widget = QtGui.QWidget()\n hbox = QtGui.QHBoxLayout(widget)\n hbox.addWidget(splitter)\n\n self.setCentralWidget(widget)\n\n # set size and put window to center of the screen\n self.resize(600, 400)\n qr = self.frameGeometry()\n qr.moveCenter(QtGui.QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n # set title and show\n self.setWindowTitle(\"Scene GUI: \" + scene.name)\n self.show()", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def export_mesh(remote, path):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_ExportMeshFile_CurrentSelection(path)\n remote.runCommand(cmd)", "def my_handler(scene):\n cube_winkel_x = degrees(bpy.data.objects['Cube'].rotation_euler.x)\n cube_winkel_y = degrees(bpy.data.objects['Cube'].rotation_euler.y)\n\n\n # Aktionen auf den servos:\n cube_servo.turnAngle(cube_winkel_x)\n kiefer_servo.turnAngle(cube_winkel_y)", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def export_onnx():\r\n model = DivideBy255()\r\n X = torch.randn(1, 3, 256, 256, dtype=torch.float)\r\n onnx_name = \"DivideBy255.onnx\"\r\n\r\n print(f\"Generating {onnx_name}\")\r\n torch.onnx.export(\r\n model,\r\n (X),\r\n onnx_name,\r\n opset_version=10,\r\n do_constant_folding=True,\r\n # verbose=True,\r\n # input_names=['Identity_1', 'Identity'],\r\n output_names=['input_1']\r\n )", "def rdmb_povray_save_q(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([0, 0, -10],\n 'color', [1.0, 1.0, 1.0], 'parallel', 'shadowless')\n light1 = LightSource([-10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light2 = LightSource([10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light3 = LightSource([0, -10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light4 = LightSource([0, 10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light1, light2, light3, light4, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def __init__(self, scene): # type: (Scene) -> None\n self.scene = scene", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def vesuvio_example():\n router = Router(topo_file=PROJECT_PATH + \"vtk/Vesuvio\")\n router.routing(32729, 31991)\n # write to vtk\n router.write2vtk(router.acqueduct)\n # render_vtk(\"vtk/Vesuvio\")", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def reference_scene(file_path, **kwargs):\n\n pass", "def output(self):\n return {\n \"action\": \"RunScene\",\n \"arguments\": [\n {\n \"name\": \"SceneNum\", \n \"value\": self.id\n }\n ], \n \"service\": \"urn:micasaverde-com:serviceId:HomeAutomationGateway1\"\n }", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def rdmb_povray_save(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([-3, 2, -6], 'color', [1.0, 1.0, 1.0], 'parallel')\n light2 = LightSource([2, -2, -6], 'color', [0.6, 0.6, 0.6], 'parallel')\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 0.2, 'diffuse', 0.8, 'phong', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light2, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")", "def scenes_to_frames():\n # Scene 001 from frames 1-150\n cmd.scene('001', animate=0)\n cmd.mview('store', 1)\n cmd.mview('store', 150)\n # Scene 002 from frames 250-400\n cmd.scene('002', animate=0)\n cmd.mview('store', 250)\n cmd.mview('store', 400)", "def create_scene(self):\n\n c = config.Colors.background\n gr3.setbackgroundcolor(c[0], c[1], c[2], 1.0)\n gr3.clear()\n\n if self.results is None:\n return\n\n show_domains = self.settings.show_domains\n show_surface_cavities = self.settings.show_surface_cavities\n show_center_cavities = self.settings.show_center_cavities\n if show_center_cavities and self.results.center_cavities is not None:\n show_surface_cavities = False\n elif show_surface_cavities and self.results.surface_cavities is not None:\n show_domains = False\n\n self.objectids = [None]\n edges = self.results.atoms.volume.edges\n num_edges = len(edges)\n edge_positions = [edge[0] for edge in edges]\n edge_directions = [[edge[1][i]-edge[0][i] for i in range(3)] for edge in edges]\n edge_lengths = [sum([c*c for c in edge])**0.5 for edge in edge_directions]\n edge_radius = min(edge_lengths)/200\n if self.settings.show_bounding_box:\n gr3.drawcylindermesh(num_edges, edge_positions, edge_directions,\n [config.Colors.bounding_box]*num_edges,\n [edge_radius]*num_edges, edge_lengths)\n corners = list(set([tuple(edge[0]) for edge in edges] + [tuple(edge[1]) for edge in edges]))\n num_corners = len(corners)\n gr3.drawspheremesh(num_corners, corners,\n [config.Colors.bounding_box]*num_corners,\n [edge_radius]*num_corners)\n\n if self.settings.show_atoms and self.results.atoms is not None:\n visible_atom_indices = self.settings.visible_atom_indices\n if visible_atom_indices is not None:\n visible_atom_indices = [comp for comp in visible_atom_indices if 0 <= comp < self.results.atoms.number]\n else:\n visible_atom_indices = range(self.results.atoms.number)\n if len(visible_atom_indices) == 0:\n visible_atom_indices = None\n if visible_atom_indices is not None:\n visible_atom_indices = np.array(visible_atom_indices)\n gr3.drawspheremesh(len(visible_atom_indices),\n self.results.atoms.positions[visible_atom_indices],\n self.results.atoms.colors[visible_atom_indices],\n np.ones(len(visible_atom_indices))*config.OpenGL.atom_radius)\n if self.settings.show_bonds:\n bonds = self.results.atoms.bonds\n for start_index, target_indices in enumerate(bonds):\n if start_index not in visible_atom_indices:\n continue\n target_indices = np.array([i for i in target_indices if i in visible_atom_indices])\n if len(target_indices) == 0:\n continue\n start_position = self.results.atoms.positions[start_index]\n target_positions = self.results.atoms.positions[target_indices]\n directions = target_positions - start_position\n bond_lengths = la.norm(directions, axis=1)\n directions /= bond_lengths.reshape(len(directions), 1)\n gr3.drawcylindermesh(len(target_indices),\n target_positions,\n -directions,\n [config.Colors.bonds] * self.results.atoms.number,\n np.ones(bond_lengths.shape)*config.OpenGL.bond_radius,\n bond_lengths)\n\n if self.results is None:\n return\n if show_domains and self.results.domains is not None:\n self.draw_cavities(self.results.domains,\n config.Colors.domain, 'domain',\n self.settings.visible_domain_indices)\n if show_surface_cavities and self.results.surface_cavities is not None:\n self.draw_cavities(self.results.surface_cavities,\n config.Colors.surface_cavity, 'surface cavity',\n self.settings.visible_surface_cavity_indices)\n if show_center_cavities and self.results.center_cavities is not None:\n self.draw_cavities(self.results.center_cavities,\n config.Colors.center_cavity, 'center cavity',\n self.settings.visible_center_cavity_indices)", "def create_main_saver_node(self, version):\n fps = 25\n if version:\n project = version.task.project\n fps = project.fps\n\n random_ref_id = uuid.uuid4().hex\n\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n 'connected_to': {\n # 'ref_id': random_ref_id\n 'Input': {\n 'type': 'ColorCurves',\n 'ref_id': random_ref_id,\n 'input_list': {\n 'EditAlpha': 0.0,\n },\n 'connected_to': {\n 'Input': {\n 'type': 'CineonLog',\n 'input_list': {\n 'Mode': 1,\n # 'RedBlackLevel': 0.0,\n # 'RedWhiteLevel': 1023.0,\n 'RedFilmStockGamma': 1.0\n },\n }\n }\n }\n }\n },\n },\n {\n 'name': 'tga',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('tga'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'tga'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'TGAFormat',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 1,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 1,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n {\n 'name': 'mov',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mov'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mov'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'Apple ProRes 422 HQ_apch',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n\n 'QuickTimeMovies.LimitDataRate': 0.0,\n 'QuickTimeMovies.DataRateK': 1000.0,\n 'QuickTimeMovies.Advanced': 1.0,\n 'QuickTimeMovies.Primaries': 0.0,\n 'QuickTimeMovies.Transfer': 0.0,\n 'QuickTimeMovies.Matrix': 0.0,\n 'QuickTimeMovies.PixelAspectRatio': 0.0,\n 'QuickTimeMovies.ErrorDiffusion': 1.0,\n 'QuickTimeMovies.SaveAlphaChannel': 1.0,\n\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n\n\n\n },\n 'connected_to': {\n 'ref_id': random_ref_id\n }\n }\n },\n ]\n\n if version.task.type and version.task.type.name == 'Plate':\n # create a different type of outputs\n output_format_data = [\n {\n 'name': 'jpg',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('jpg'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'jpg'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'JPEGFormat',\n 'JpegFormat.Quality': 85,\n },\n },\n },\n {\n 'name': 'exr',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('exr'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'exr'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'OpenEXRFormat',\n 'OpenEXRFormat.Depth': 1, # 16-bit float\n 'OpenEXRFormat.RedEnable': 1,\n 'OpenEXRFormat.GreenEnable': 1,\n 'OpenEXRFormat.BlueEnable': 1,\n 'OpenEXRFormat.AlphaEnable': 0,\n 'OpenEXRFormat.ZEnable': 0,\n 'OpenEXRFormat.CovEnable': 0,\n 'OpenEXRFormat.ObjIDEnable': 0,\n 'OpenEXRFormat.MatIDEnable': 0,\n 'OpenEXRFormat.UEnable': 0,\n 'OpenEXRFormat.VEnable': 0,\n 'OpenEXRFormat.XNormEnable': 0,\n 'OpenEXRFormat.YNormEnable': 0,\n 'OpenEXRFormat.ZNormEnable': 0,\n 'OpenEXRFormat.XVelEnable': 0,\n 'OpenEXRFormat.YVelEnable': 0,\n 'OpenEXRFormat.XRevVelEnable': 0,\n 'OpenEXRFormat.YRevVelEnable': 0,\n 'OpenEXRFormat.XPosEnable': 0,\n 'OpenEXRFormat.YPosEnable': 0,\n 'OpenEXRFormat.ZPosEnable': 0,\n 'OpenEXRFormat.XDispEnable': 0,\n 'OpenEXRFormat.YDispEnable': 0,\n },\n },\n },\n {\n 'name': 'mp4',\n 'node_tree': {\n 'type': 'Saver',\n 'attr': {\n 'TOOLS_Name': self.output_node_name_generator('mp4'),\n },\n 'input_list': {\n 'Clip': self.output_path_generator(version, 'mp4'),\n 'CreateDir': 1,\n 'ProcessRed': 1,\n 'ProcessGreen': 1,\n 'ProcessBlue': 1,\n 'ProcessAlpha': 0,\n 'OutputFormat': 'QuickTimeMovies',\n 'ProcessMode': 'Auto',\n 'SaveFrames': 'Full',\n 'QuickTimeMovies.Compression': 'H.264_avc1',\n 'QuickTimeMovies.Quality': 95.0,\n 'QuickTimeMovies.FrameRateFps': fps,\n 'QuickTimeMovies.KeyFrames': 5,\n 'StartRenderScript': 'frames_at_once = comp:GetPrefs(\"Comp.Memory.FramesAtOnce\")\\ncomp:SetPrefs(\"Comp.Memory.FramesAtOnce\", 1)',\n 'EndRenderScript': 'comp:SetPrefs(\"Comp.Memory.FramesAtOnce\", frames_at_once)',\n },\n },\n },\n ]\n\n # selectively generate output format\n saver_nodes = self.get_main_saver_node()\n\n for data in output_format_data:\n format_name = data['name']\n node_tree = data['node_tree']\n\n # now check if a node with the same name exists\n format_node = None\n format_node_name = self.output_node_name_generator(format_name)\n for node in saver_nodes:\n node_name = node.GetAttrs('TOOLS_Name')\n if node_name.startswith(format_node_name):\n format_node = node\n break\n\n # create the saver node for this format if missing\n if not format_node:\n self.create_node_tree(node_tree)\n else:\n # just update the input_lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = format_node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n try:\n os.makedirs(\n os.path.dirname(\n self.output_path_generator(version, format_name)\n )\n )\n except OSError:\n # path already exists\n pass", "def onExport( self ):\n self.__exportButton.text = \"Working...\"\n slicer.app.processEvents()\n\n outputDir = os.path.abspath( self.__dirButton.directory )\n outputFile = os.path.join( outputDir, 'index.html' )\n\n try:\n output = self.logic.export( self.__captionCombobox.currentIndex, self.__copyCheckbox.checked, outputDir )\n except Exception as e:\n # maybe the scene was not saved?\n qt.QMessageBox.warning( None, 'Error', 'Please make sure the scene was saved before attempting to export to WebGL!' )\n self.__exportButton.text = \"Export to WebGL\"\n return\n\n if self.__serverCheckbox.checked and useWebserver:\n # start server\n os.chdir( outputDir )\n\n # if we have already a httpd running, kill it now\n # it will likely leave an orphaned process but since we mark it killed,\n # slicer will destroy it on exit\n if self.__httpd:\n self.__p.terminate()\n # increase the port\n self.__port += 1\n\n # check first if the port is available (since we open it as a new process we can not check later)\n portFree = False\n while not portFree:\n try:\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n s.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )\n s.bind( ( \"\", self.__port ) )\n except socket.error, e:\n portFree = False\n self.__port += 1\n finally:\n s.close()\n portFree = True\n\n # we need to break out of the pythonQt context here to make multiprocessing work\n import sys\n sys.stdin = sys.__stdin__\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n self.__handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n self.__httpd = SocketServer.TCPServer( ( \"\", self.__port ), self.__handler )\n self.__p = m.Process( target=self.__httpd.serve_forever )\n self.__p.start()\n\n url = 'http://localhost:' + str( self.__port ) + '/index.html'\n else:\n # no server\n url = outputFile\n\n with open( outputFile, 'w' ) as f:\n f.write( output )\n\n self.__exportButton.text = \"Export to WebGL\"\n\n if self.__viewCheckbox.checked:\n time.sleep( 1 )\n webbrowser.open_new_tab( url )", "def deleteAllModelsFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('manual-seg_*') != {}:\n nodes = slicer.util.getNodes('manual-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('obturator-seg_*') != {}:\n nodes = slicer.util.getNodes('obturator-seg_*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n #while slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode') !={}:\n # nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n # for node in nodes.values():\n # slicer.mrmlScene.RemoveNode(node)\n while slicer.util.getNodes('template slice position*') != {}:\n nodes = slicer.util.getNodes('template slice position*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n sYellow.SetSliceVisible(0)\n reformatLogic = slicer.vtkSlicerReformatLogic()\n reformatLogic.SetSliceNormal(sYellow,1,0,0)\n tempFidNodes = slicer.mrmlScene.GetNodesByName('Temp')\n for i in range(tempFidNodes.GetNumberOfItems()):\n node = tempFidNodes.GetItemAsObject(i)\n if node:\n slicer.mrmlScene.RemoveNode(node)\n sYellow.Modified()", "def script(self):", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def main():\r\n \r\n world = WorldModel()\r\n #uncomment these lines and comment out the next 2 if you want to use the\r\n #full Baxter model\r\n #print \"Loading full Baxter model (be patient, this will take a minute)...\"\r\n #world.loadElement(os.path.join(model_dir,\"baxter.rob\"))\r\n print \"Loading simplified Baxter model...\"\r\n world.loadElement(os.path.join(model_dir,\"baxter_col.rob\"))\r\n print \"Loading Kiva pod model...\"\r\n world.loadElement(os.path.join(model_dir,\"kiva_pod/model.obj\"))\r\n print \"Loading plane model...\"\r\n world.loadElement(os.path.join(model_dir,\"plane.env\"))\r\n \r\n #shift the Baxter up a bit (95cm)\r\n Rbase,tbase = world.robot(0).getLink(0).getParentTransform()\r\n world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))\r\n \r\n #translate pod to be in front of the robot, and rotate the pod by 90 degrees \r\n Trel = (so3.rotation((0,0,1),math.pi/2),[1.1,0,0])\r\n T = world.rigidObject(0).getTransform()\r\n world.rigidObject(0).setTransform(*se3.mul(Trel,T))\r\n \r\n #run the visualizer\r\n visualizer = MyGLViewer(world)\r\n visualizer.run()", "def export(self, savepath):\n logger.debug(f\"Exporting scene to {savepath}\")\n _backend = self.backend\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n path = Path(savepath)\n if path.suffix != \".html\":\n raise ValueError(\"Savepath should point to a .html file\")\n\n # prepare settings\n vsettings.notebookBackend = \"k3d\"\n\n # Create new plotter and save to file\n plt = Plotter()\n plt.add(self.clean_renderables, render=False)\n plt = plt.show(interactive=False)\n plt.camera[-2] = -1\n\n with open(path, \"w\") as fp:\n fp.write(plt.get_snapshot())\n\n print(\n f\"The brainrender scene has been exported for web. The results are saved at {path}\"\n )\n\n # Reset settings\n vsettings.notebookBackend = None\n self.backend = _backend\n\n return str(path)", "def export_onnx(self, export_path) -> None:\n EVAL_MAX_CLICKS = self.net_clicks_limit\n POINT_LENGTH = EVAL_MAX_CLICKS * 2\n HEIGHT, WIDTH = self.input_size\n\n # NOTE: dim=0: orig_img + flip_img = 2\n _image = torch.randn(2, 3, HEIGHT, WIDTH,\n device=self.device,\n dtype=torch.float32)\n _points = torch.ones(2, POINT_LENGTH, 2,\n device=self.device,\n dtype=torch.int32)\n\n # Providing input and output names sets the display names for values\n # within the model's graph. Setting these does not change the semantics\n # of the graph; it is only for readability.\n #\n # The inputs to the network consist of the flat list of inputs (i.e.\n # the values you would pass to the forward() method) followed by the\n # flat list of parameters. You can partially specify names, i.e. provide\n # a list here shorter than the number of inputs to the model, and we will\n # only set that subset of names, starting from the beginning.\n input_names = [ \"image\" ] + [ \"points\" ]\n output_names = [ \"output\"]\n\n # NOTE: Dynamic Axes make input dimension dynamic.\n dynamic_axes = {'points': {1: 'num_pts'}}\n\n # NOTE: Paramters Explanation\n # * args: input arguments. Wrap multiple inputs as tuple.\n # * f: path where the ONNX model is exported.\n # * do_constant_folding: enable constant-folding optimization\n # * input_names: setup input names as a list of string\n # * output_names: setup output names as a list of string\n # * opset_version: opset version of ONNX model. Latest one is recommended.\n # * operator_export_type:\n # * OperatorExportTypes.ONNX: normal mode\n # * OperatorExportTypes.ONNX_ATEN_FALLBACK: check 'ATen' node in debug mode\n # * dynamic_axes: define dynamic dimension inputs\n torch.onnx.export(self.net,\n args=(_image, _points),\n f=export_path,\n export_params=True,\n do_constant_folding=True,\n verbose=True,\n input_names=input_names,\n output_names=output_names,\n opset_version=12,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX,\n dynamic_axes=dynamic_axes)", "def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)", "def write_viman(self, component_instance):\n # Get the id of the poster already created\n poster_id = self._poster_dict[component_instance.blender_obj.name]\n parent = component_instance.robot_parent\n\n scene = bge.logic.getCurrentScene()\n \n seen_objects = [obj['name'] for obj in component_instance.local_data['visible_objects']]\n\n i = 0\n for object_id in self.scene_object_list:\n\n try:\n t = time.time()\n tacq_sec = int(t)\n tacq_usec = int((t - tacq_sec) * 1000)\n ors_viman_poster.set_tacq(self.viman_data, i, tacq_sec, tacq_usec)\n \n if object_id in seen_objects:\n\n object = passive_objects.obj_from_label(object_id)\n\n position_3d = Transformation3d(object)\n logger.debug(\"VIMAN \" + object_id + \"(\" + object.name + \") is visible at \" + str(position_3d))\n ors_viman_poster.set_visible(self.viman_data, i, 1)\n _fill_world_matrix(self.viman_data, position_3d, i)\n _fill_robot_matrix(self.viman_data, parent, position_3d, i)\n else:\n ors_viman_poster.set_visible (self.viman_data, i, 0)\n \n \n # Write to the poster with the data for all objects\n posted = ors_viman_poster.real_post_viman_poster(poster_id, self.viman_data)\n except KeyError as detail:\n logger.debug(\"WARNING: Object %s not found in the scene\" % detail)\n pass\n posted = False\n\n i = i + 1", "def save_and_reload_scene():\n\n flg = logging.getLogger(\"lettuce.xgenSetup.save_and_reload_scene\")\n\n current_file = mc.file(save=True)\n flg.info(\"Current File: {}\".format(current_file))\n mc.file(current_file, ignoreVersion=True, open=True, force=True)", "def show_visuals(self, objects_in_scene, image, axe_pred):\n image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\n\n # draw grid (slow)\n #image = self.draw_grid(image)\n\n # add axe bounding box\n #image = self.return_bbox_image(image, objects_in_scene.axes, \"Axe\", AXE_COLOR)\n\n # add mundo bounding box\n #image = self.return_bbox_image(image, objects_in_scene.mundos, \"Mundo\", MUNDO_COLOR)\n\n # add a circle/dot at the centre of the axe bbox\n image = self.show_centre_of_bbox(image, objects_in_scene.axes)\n\n # if there is a prediction made in the current frame, draw an arrow graphic to highlight\n # where the program predicts the axe will go\n if axe_pred:\n image = self.draw_pred_arrows(image, axe_pred, 1)\n\n\n\n\n # open live capture window with new shapes\n try:\n image = cv2.resize(image, (960, 540)) \n cv2.imshow(\"visualisation\", image)\n\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n exit()\n\n except:\n pass", "def __publish_obj(self, item, output, work_template, primary_publish_path,\n sg_task, comment, thumbnail_path, progress_cb):\n # determine the publish info to use\n #\n progress_cb(10, \"Determining publish details\")\n\n # get the current scene path and extract fields from it\n # using the work template:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n fields = work_template.get_fields(scene_path)\n publish_version = fields[\"version\"]\n tank_type = output[\"tank_type\"]\n\n # create the publish path by applying the fields\n # with the publish template:\n publish_template = output[\"publish_template\"]\n publish_path = publish_template.apply_fields(fields)\n\n # ensure the publish folder exists:\n publish_folder = os.path.dirname(publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n\n # determine the publish name:\n publish_name = fields.get(\"name\")\n if not publish_name:\n publish_name = os.path.basename(publish_path)\n\n # Find additional info from the scene:\n #\n progress_cb(20, \"Analysing scene\")\n\n # build the export command.\n obj_export_cmd = \"file -force -es -pr -typ \\\"OBJexport\\\"\"\n obj_export_cmd += \" -options \\\"groups=1;ptgroups=1;materials=0;smoothing=1;normals=1\\\"\"\n obj_export_cmd += \" \\\"%s\\\"\" % (publish_path.replace(\"\\\\\", \"/\"))\n\n # ...and execute it:\n progress_cb(30, \"Exporting OBJ file\")\n try:\n self.parent.log_debug(\"Executing command: %s\" % obj_export_cmd)\n\n # make sure plugin is loaded\n if not cmds.pluginInfo('objExport',query=True,loaded=True):\n cmds.loadPlugin('objExport')\n\n # clear selection, select what's in the set\n sel = cmds.ls(sl=True)\n set_contents = cmds.sets('publish_SET',q=True)\n cmds.select(clear=True)\n for obj in set_contents:\n cmds.select(obj,add=True)\n\n # do the actual export\n mel.eval(obj_export_cmd)\n\n # then restore the selection\n cmds.select(clear=True)\n for obj in sel:\n cmds.select(obj,add=True)\n\n except Exception, e:\n raise TankError(\"Failed to export OBJ file: %s\" % e)\n\n # register the publish:\n progress_cb(75, \"Registering the publish\")\n args = {\n \"tk\": self.parent.tank,\n \"context\": self.parent.context,\n \"comment\": comment,\n \"path\": publish_path,\n \"name\": publish_name,\n \"version_number\": publish_version,\n \"thumbnail_path\": thumbnail_path,\n \"task\": sg_task,\n \"dependency_paths\": [primary_publish_path],\n \"published_file_type\":tank_type\n }\n tank.util.register_publish(**args)", "def make_main(self):\n\t\tself.scene.camera = self.main_camera", "def visualize(self):\n self.octree.updateInnerOccupancy()\n print(\"Start Octomap Visualization\")\n\n # define parameters\n data = imgviz.data.arc2017()\n camera_info = data['camera_info']\n K = np.array(camera_info['K']).reshape(3, 3)\n width=camera_info['width']\n height=camera_info['height']\n\n # get free and occupied grid\n occupied, _ = self.octree.extractPointCloud()\n #frontier = self.gen_frontier()\n \n print(\"load point cloud\")\n window = pyglet.window.Window(\n width=int(1280), height=int(960)\n )\n\n @window.event\n def on_key_press(symbol, modifiers):\n if modifiers == 0:\n if symbol == pyglet.window.key.Q:\n window.on_close()\n\n gui = glooey.Gui(window)\n hbox = glooey.HBox()\n hbox.set_padding(5)\n\n camera = trimesh.scene.Camera(\n resolution=(width, height), focal=(K[0, 0], K[1, 1])\n )\n\n # initial camera pose\n camera_transform = np.array(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -5],\n [0.0, 0.0, 0.0, 1.0],\n ],\n )\n\n \n\n occupied_geom = trimesh.voxel.ops.multibox(\n occupied, pitch=self.resolution, colors=[0.0, 0.0, 0.0, 0.5]\n )\n\n # frontier_geom = trimesh.voxel.ops.multibox(\n # frontier, pitch=self.resolution, colors=[1.0, 0, 0, 0.5]\n # )\n scene = trimesh.Scene(camera=camera, geometry=[occupied_geom])#, frontier_geom])\n scene.camera_transform = camera_transform\n hbox.add(self.labeled_scene_widget(scene, label='octomap'))\n\n\n gui.add(hbox)\n pyglet.app.run()", "def save(self, _name):\r\n try:\r\n with open(_name, 'w+') as fout:\r\n fout.write(\".cube file generated from prt_esolv.py\\n\")\r\n fout.write(f\"{_name}\\n\")\r\n\r\n fout.write(\r\n f\"{int(self.n_atoms)} {float(self.origin[0])} {float(self.origin[1])} {float(self.origin[2])}\\n\")\r\n\r\n fout.write(f\"{int(self.n_x)} {float(self.x[0])} {float(self.x[1])} {float(self.x[2])}\\n\")\r\n fout.write(f\"{int(self.n_y)} {float(self.y[0])} {float(self.y[1])} {float(self.y[2])}\\n\")\r\n fout.write(f\"{int(self.n_z)} {float(self.z[0])} {float(self.z[1])} {float(self.z[2])}\\n\")\r\n\r\n for atom, xyz in zip(self.atoms, self.atoms_xyz):\r\n fout.write(f\"{atom} 0 {xyz[0]} {xyz[1]} {xyz[2]}\\n\")\r\n\r\n for ix in range(self.n_x):\r\n for iy in range(self.n_y):\r\n for iz in range(self.n_z):\r\n fout.write(f\"{self.data[ix][iy][iz]}\")\r\n if iz % 6 == 5:\r\n fout.write('\\n')\r\n fout.write(\"\\n\")\r\n except IOError:\r\n print(f\"Can't create {_name} file!!!\")\r\n raise\r\n\r\n return None", "def __init__(self):\r\n self.label = \"OVL to Feature\"\r\n self.description = \"OVL to Feature converts an OVL file from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def export(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net') #+ name)\n export_path = os.path.join(self.configuration['export_path'], 'exported_net_{}.pth'.format(name))\n batch_fixed = self.input[:,1,:,:,:]\n batch_moving = self.input[:,2,:,:,:]\n traced_script_module = torch.jit.trace(net, (batch_moving, batch_fixed))\n traced_script_module.save(export_path)", "def on_exit(self, next_scene):", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n return self.writePointLight(view,renderer)\n\n # get color and alpha\n mat = None\n color = None\n alpha = None\n if view.Material:\n mat = view.Material\n else:\n if \"Material\" in view.Source.PropertiesList:\n if view.Source.Material:\n mat = view.Source.Material\n if mat:\n if \"Material\" in mat.PropertiesList:\n if \"DiffuseColor\" in mat.Material:\n color = mat.Material[\"DiffuseColor\"].strip(\"(\").strip(\")\").split(\",\")[:3]\n if \"Transparency\" in mat.Material:\n if float(mat.Material[\"Transparency\"]) > 0:\n alpha = 1.0 - float(mat.Material[\"Transparency\"])\n else:\n alpha = 1.0\n\n if view.Source.ViewObject:\n if not color:\n if hasattr(view.Source.ViewObject,\"ShapeColor\"):\n color = view.Source.ViewObject.ShapeColor[:3]\n if not alpha:\n if hasattr(view.Source.ViewObject,\"Transparency\"):\n if view.Source.ViewObject.Transparency > 0:\n alpha = 1.0-(float(view.Source.ViewObject.Transparency)/100.0)\n if not color:\n color = (1.0, 1.0, 1.0)\n if not alpha:\n alpha = 1.0\n\n # get mesh\n mesh = None\n if hasattr(view.Source,\"Group\"):\n shps = [o.Shape for o in Draft.getGroupContents(view.Source) if hasattr(o,\"Shape\")]\n mesh = MeshPart.meshFromShape(Shape=Part.makeCompound(shps),\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Part::Feature\"):\n mesh = MeshPart.meshFromShape(Shape=view.Source.Shape,\n LinearDeflection=0.1,\n AngularDeflection=0.523599,\n Relative=False)\n elif view.Source.isDerivedFrom(\"Mesh::Feature\"):\n mesh = view.Source.Mesh\n if not mesh:\n return \"\"\n\n return renderer.writeObject(view,mesh,color,alpha)", "def on_action_4_triggered(self):\n # TODO: not implemented yet\n model = self.model2\n self.doExport(model)\n #raise NotImplementedError", "def new_scene(force=True, **kwargs):\n\n pass", "def run_app(self):\r\n ## Tell the artist to be patient... eg not genY\r\n inprogressBar = pbui.ProgressBarUI(title = 'Rebuilding Surfacing Scene From Publish:')\r\n inprogressBar.show()\r\n inprogressBar.updateProgress(percent = 1, doingWhat = 'Processing scene info...')\r\n ## Instantiate the API\r\n tk = sgtk.sgtk_from_path(\"T:/software/bubblebathbay\")\r\n debug(app = self, method = 'run_app', message = 'API instanced...\\n%s' % tk, verbose = False)\r\n debug(app = self, method = 'run_app', message = 'Fetch Surface Shaders launched...', verbose = False)\r\n \r\n context = self.context ## To get the step\r\n debug(app = self, method = 'run_app', message = 'Context Step...\\n%s' % context.step['name'], verbose = False)\r\n if context.step['name'] != 'Surface':\r\n cmds.warning(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n QtGui.QMessageBox.information(None, \"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n raise tank.TankError(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n \r\n scene_path = '%s' % os.path.abspath(cmds.file(query=True, sn= True))\r\n debug(app = self, method = 'run_app', message = 'scene_path... %s' % scene_path, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n entity = self.context.entity ## returns {'type': 'Shot', 'name': 'ep100_sh010', 'id': 1166}\r\n debug(app = self, method = 'run_app', message = 'entity... %s' % entity, verbose = False)\r\n \r\n ## Filter for the matching ID for the shot\r\n sg_filters = [[\"id\", \"is\", entity[\"id\"]]]\r\n debug(app = self, method = 'run_app', message = 'sg_filters... %s' % sg_filters, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n sg_entity_type = self.context.entity[\"type\"] ## returns Shot\r\n debug(app = self, method = 'run_app', message = 'sg_entity_type...\\n%s' % sg_entity_type, verbose = False)\r\n \r\n ## DATA\r\n ## NOTES SO HERE WE DON'T NEED TO CALL THE ASSETS FIELD FROM SHOTGUN\r\n ## WE CAN JUST GRAB THE LATEST PUBLISH FILE FROM EACH OF THE TEMPLATE STEPS\r\n inprogressBar.updateProgress(percent = 3, doingWhat = 'Processing scene info...')\r\n shadersTemplate = tk.templates[self.get_setting('maya_asset_SHD_XML_template')]\r\n debug(app = self, method = 'run_app', message = 'shadersTemplate...\\n%s' % shadersTemplate, verbose = False)\r\n\r\n ## PROCESS TEMPLATE NOW\r\n inprogressBar.updateProgress(percent = 5, doingWhat = 'Processing shaders xml...') \r\n debug(app = self, method = 'run_app', message = 'Processing template... %s' % shadersTemplate, verbose = False)\r\n ## SHADERS\r\n self.processTemplates(tk = tk, templateFile = shadersTemplate, id = entity[\"id\"], shotNum = entity[\"name\"], inprogressBar = inprogressBar, lighting = False)\r\n \r\n ############################################\r\n ## CORE ACHIVES \r\n ## Now process the assembly References\r\n debug(app = self, method = 'run_app', message = 'Processing mentalCore assemblies..', verbose = False)\r\n inprogressBar.updateProgress(percent = 50, doingWhat = 'Processing core archives...')\r\n if cmds.objExists('CORE_ARCHIVES_hrc') or cmds.objExists('CORE_ARCHIVES_hrc'):\r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'Complete...')\r\n inprogressBar.close()\r\n inprogressBar = None\r\n else:\r\n ## Get the assembly paths from the transforms in the scene with the correct tags to load now..\r\n self.getAssemblyPaths = coreLib.getCorePaths()\r\n debug(app = self, method = 'run_app', message = 'self.getAssemblyPaths.. %s' % self.getAssemblyPaths, verbose = False)\r\n \r\n ## Now load the assemblies from the paths\r\n coreLib.loadCoreArchives(paths = self.getAssemblyPaths)\r\n debug(app = self, method = 'run_app', message = 'self.loadCoreArchives Successful all assemblies loaded moving on to reconnect now...', verbose = False)\r\n inprogressBar.updateProgress(percent = 70, doingWhat = 'Core archives loaded...')\r\n \r\n ## Now connect the assemblies.\r\n inprogressBar.updateProgress(percent = 80, doingWhat = 'Reconnecting core archives...')\r\n coreLib.doReconnect(postPublish = False)\r\n debug(app = self, method = 'run_app', message = 'Ahh core archive assemblies reconnected successfully!!...', verbose = False)\r\n \r\n ## Now cleanup\r\n inprogressBar.updateProgress(percent = 90, doingWhat = 'Cleaning...')\r\n ## Group the placements\r\n cleanup.shotCleanupPlacements() \r\n ## Group the lights\r\n cleanup.shotCleanupLights()\r\n ## Put all the coreRebuild under Lighting_hrc group\r\n coreLib._cleanupCoreArchiveRebuildGrps('LIGHTING_hrc')\r\n \r\n \r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'COMPLETE...')\r\n inprogressBar.close()\r\n inprogressBar = None", "def save_model(self, export_path, save_ae=True):\n net_dict = self.net.state_dict()\n ae_net_dict = self.ae_net.state_dict() if save_ae else None\n #ae_threshold = self.scores_threhold_rec if save_ae else None\n\n torch.save({'c': self.c,\n 'net_dict': net_dict,\n 'ae_net_dict': ae_net_dict}, export_path)", "def saveViews(lib, filename='views', path=os.path.expanduser('~')):\n ext = '.camera'\n os.chdir(path)\n f = open(filename + ext, 'wb')\n pickle.dump(lib, f, pickle.HIGHEST_PROTOCOL)\n f.close()", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n # cmds.file(absSceneFile, i=True)\n nuke.nodePaste(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(210, msg)\n return -1, msg", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def main():\n obj = VplexStorageview()\n obj.perform_module_operation()", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def loadMultiple(method, *args):\n\n ### Declaring attributes\n selectedCurve = selectedMesh = None\n minRangeX = minRangeY = minRangeZ = maxRangeX = maxRangeY = maxRangeZ = 0\n selectedObjects = []\n\n ### Query UI values\n # Choise between standin / assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # List of all asset icons on UI\n objectIconsList = cmds.layout(objectScroll, query=True, childArray=True)\n # Amount of copies\n buildingAmount = cmds.intSliderGrp(SpawnObjectsTab.BuildingAmount, query=True, value=True)\n # Deviation from original rotation\n rotationVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomRotation, query=True, value=True)\n # Deviation from original scale\n scaleVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomScale, query=True, value=True)\n\n ### Iterate over each asset icon\n for obj in objectIconsList:\n\n # Append to list if the asset is selected\n isSelected = cmds.iconTextCheckBox(obj, query=True, value=True)\n\n if isSelected:\n selectedObjects.append(cmds.iconTextCheckBox(obj, query=True, label=True))\n\n # Exit the function if no asset is selected\n if not selectedObjects:\n return\n \n # Reference to the function that will scatter the copies\n scatteringFunction = None\n\n ### The user chose \"curve\"\n if method == \"curve\":\n \n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnCurve\n\n # Get curve reference\n selectedCurve = cmds.ls(selection=True)\n if not selectedCurve:\n return\n selectedCurve = selectedCurve[0]\n\n ### The user chose \"range\"\n if method == \"range\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnRange\n\n # Query minimum values from floatField\n minValues = cmds.floatFieldGrp(SpawnObjectsTab.MinimumField, query=True, value=True)\n minRangeX, minRangeY, minRangeZ = minValues[0], minValues[1], minValues[2]\n # Query maximum values from floatField\n maxValues = cmds.floatFieldGrp(SpawnObjectsTab.MaximumField, query=True, value=True)\n maxRangeX, maxRangeY, maxRangeZ = maxValues[0], maxValues[1], maxValues[2]\n\n ### The user chose \"mesh\"\n if method == \"mesh\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnMesh\n\n # Get reference of selected object\n selectedMesh = cmds.ls(selection=True)\n if not selectedMesh:\n return\n selectedMesh = selectedMesh[0]\n\n # Create group for the spawned copies \n finalGroup = cmds.group(name=\"CurveAssetGroup\", empty=True)\n cmds.select(clear=True)\n\n ### Iterate over the generated positions of the function with given parameters\n # scatteringFunction is a reference to a function in ObjectScattering.py\n # these functions are generators, they yield a value and we can iterate\n # to get the next value generated.\n for position in scatteringFunction(objectCount=buildingAmount, curve=selectedCurve,\n minX=minRangeX, minY=minRangeY, minZ=minRangeZ, maxX=maxRangeX, maxY=maxRangeY, maxZ=maxRangeZ,\n mesh=selectedMesh):\n \n # Randomly instance an asset from the selectedObjects list\n asset = AssetIcon(random.choice(selectedObjects))\n loadedAssetNode = None\n\n # Create copy based on the mode selected by the user\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n\n # Move this copy to the generated position\n cmds.move(position[0], position[1], position[2], loadedAssetNode, absolute=True)\n\n # If there is a fourth index on the position, that means we have rotation info\n # use that info to rotate the asset.\n # It is used to match an objects rotation to a face normal.\n if len(position) == 4:\n cmds.rotate(position[3][0], position[3][1], position[3][2], loadedAssetNode, absolute=True)\n \n # Add random rotation\n angle = random.uniform(-rotationVariation, rotationVariation)\n cmds.rotate(angle, loadedAssetNode, y=True, relative=True, objectSpace=True)\n\n # Add random scale\n newScale = random.uniform(1, 1+scaleVariation)\n cmds.scale(newScale, newScale, newScale, loadedAssetNode, absolute=True)\n\n #cmds.FreezeTransformations(loadedAssetNode)\n\n # Parent copy to group\n cmds.parent(loadedAssetNode, finalGroup)", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + 'camera\\n')\n buffer.write(ind + '{\\n')\n buffer.write(f'{ind}\\t\"position\" \"[{self.pos}]\"\\n')\n buffer.write(f'{ind}\\t\"look\" \"[{self.target}]\"\\n')\n buffer.write(ind + '}\\n')", "def export3DModel(self, fileName, filePath, fileFormat=\".step\", object_list=[], removed_objects=[]):\n if not object_list:\n allObjects = self.modeler.primitives.object_names\n if removed_objects:\n for rem in removed_objects:\n allObjects.remove(rem)\n else:\n if \"Region\" in allObjects:\n allObjects.remove(\"Region\")\n else:\n allObjects = object_list[:]\n\n self.add_info_message(\"Exporting {} objects\".format(len(allObjects)))\n\n stringa = \",\".join(allObjects)\n arg = [\n \"NAME:ExportParameters\",\n \"AllowRegionDependentPartSelectionForPMLCreation:=\",\n True,\n \"AllowRegionSelectionForPMLCreation:=\",\n True,\n \"Selections:=\",\n stringa,\n \"File Name:=\",\n str(filePath) + \"/\" + str(fileName) + str(fileFormat),\n \"Major Version:=\",\n -1,\n \"Minor Version:=\",\n -1,\n ]\n\n self.modeler.oeditor.Export(arg)\n return True", "def roi_cli():", "def export_alembic(self, path, geo_nodes, use_local_space=False):\n if os.path.exists(path):\n raise RuntimeError('Given path aleady exist: {}'.format(path))\n\n export_space = '' if use_local_space else '-worldSpace'\n args = [\n '-uv',\n export_space,\n '-frameRange', str(self._model.frame_in - 1),\n str(self._model.frame_out + 1),\n '-frameRelativeSample', str(self._model.motion_blur_in),\n '-frameRelativeSample', '0',\n '-frameRelativeSample', str(self._model.motion_blur_out),\n '-file', path,\n ]\n for node in geo_nodes:\n if mc.nodeType(node) != 'transform':\n node = mc.listRelatives(node, parent=True, fullPath=True)[0]\n args.extend([\n '-root', node\n ])\n\n mc.AbcExport(jobArg=[' '.join(args)])", "def save_env():\n global vis\n vis.save([vis.env])", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def on_action_2_triggered(self):\n # TODO: not implemented yet\n model = self.model\n self.doExport(model)", "def export_world(file, world, scene, global_matrix, tab_write):\n render = scene.pov\n agnosticrender = scene.render\n camera = scene.camera\n # matrix = global_matrix @ camera.matrix_world # view dependant for later use NOT USED\n if not world:\n return\n\n # These lines added to get sky gradient (visible with PNG output)\n\n # For simple flat background:\n if not world.pov.use_sky_blend:\n # No alpha with Sky option:\n if render.alpha_mode == \"SKY\" and not agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 0>}\\n\" % (world.pov.horizon_color[:])\n )\n\n elif render.alpha_mode == \"STRAIGHT\" or agnosticrender.film_transparent:\n tab_write(\n file, \"background {rgbt<%.3g, %.3g, %.3g, 1>}\\n\" % (world.pov.horizon_color[:])\n )\n else:\n # Non fully transparent background could premultiply alpha and avoid\n # anti-aliasing display issue\n tab_write(\n file,\n \"background {rgbft<%.3g, %.3g, %.3g, %.3g, 0>}\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n\n world_tex_count = 0\n # For Background image textures\n for t in world.pov_texture_slots: # risk to write several sky_spheres but maybe ok.\n if t:\n tex = bpy.data.textures[t.texture]\n if tex.type is not None:\n world_tex_count += 1\n # XXX No enable checkbox for world textures yet (report it?)\n # if t and tex.type == 'IMAGE' and t.use:\n if tex.type == \"IMAGE\":\n image_filename = path_image(tex.image)\n if tex.image.filepath != image_filename:\n tex.image.filepath = image_filename\n if image_filename != \"\" and t.use_map_blend:\n textures_blend = image_filename\n # colvalue = t.default_value\n t_blend = t\n\n # Commented below was an idea to make the Background image oriented as camera\n # taken here:\n # http://news.pov.org/pov.newusers/thread/%3Cweb.4a5cddf4e9c9822ba2f93e20@news.pov.org%3E/\n # Replace 4/3 by the ratio of each image found by some custom or existing\n # function\n # mapping_blend = (\" translate <%.4g,%.4g,%.4g> rotate z*degrees\" \\\n # \"(atan((camLocation - camLookAt).x/(camLocation - \" \\\n # \"camLookAt).y)) rotate x*degrees(atan((camLocation - \" \\\n # \"camLookAt).y/(camLocation - camLookAt).z)) rotate y*\" \\\n # \"degrees(atan((camLocation - camLookAt).z/(camLocation - \" \\\n # \"camLookAt).x)) scale <%.4g,%.4g,%.4g>b\" % \\\n # (t_blend.offset.x / 10 , t_blend.offset.y / 10 ,\n # t_blend.offset.z / 10, t_blend.scale.x ,\n # t_blend.scale.y , t_blend.scale.z))\n # using camera rotation valuesdirectly from blender seems much easier\n if t_blend.texture_coords == \"ANGMAP\":\n mapping_blend = \"\"\n else:\n # POV-Ray \"scale\" is not a number of repetitions factor, but its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # UV scale is 0.5,0.5 in blender and 0,0 in POV\n # Further Scale by 2 and translate by -1 are\n # required for the sky_sphere not to repeat\n\n mapping_blend = (\n \"scale 2 scale <%.4g,%.4g,%.4g> translate -1 \"\n \"translate <%.4g,%.4g,%.4g> rotate<0,0,0> \"\n % (\n (1.0 / t_blend.scale.x),\n (1.0 / t_blend.scale.y),\n (1.0 / t_blend.scale.z),\n 0.5 - (0.5 / t_blend.scale.x) - t_blend.offset.x,\n 0.5 - (0.5 / t_blend.scale.y) - t_blend.offset.y,\n t_blend.offset.z,\n )\n )\n\n # The initial position and rotation of the pov camera is probably creating\n # the rotation offset should look into it someday but at least background\n # won't rotate with the camera now.\n # Putting the map on a plane would not introduce the skysphere distortion and\n # allow for better image scale matching but also some waay to chose depth and\n # size of the plane relative to camera.\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n tab_write(\n file,\n 'image_map{%s \"%s\" %s}\\n'\n % (image_format(textures_blend), textures_blend, img_map_bg(t_blend)),\n )\n tab_write(file, \"}\\n\")\n tab_write(file, \"%s\\n\" % mapping_blend)\n # The following layered pigment opacifies to black over the texture for\n # transmit below 1 or otherwise adds to itself\n tab_write(file, \"pigment {rgb 0 transmit %s}\\n\" % tex.intensity)\n tab_write(file, \"}\\n\")\n # tab_write(file, \"scale 2\\n\")\n # tab_write(file, \"translate -1\\n\")\n\n # For only Background gradient\n\n if world_tex_count == 0 and world.pov.use_sky_blend:\n tab_write(file, \"sky_sphere {\\n\")\n tab_write(file, \"pigment {\\n\")\n # maybe Should follow the advice of POV doc about replacing gradient\n # for skysphere..5.5\n tab_write(file, \"gradient y\\n\")\n tab_write(file, \"color_map {\\n\")\n\n if render.alpha_mode == \"TRANSPARENT\":\n tab_write(\n file,\n \"[0.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.horizon_color[0],\n world.pov.horizon_color[1],\n world.pov.horizon_color[2],\n render.alpha_filter,\n ),\n )\n tab_write(\n file,\n \"[1.0 rgbft<%.3g, %.3g, %.3g, %.3g, 0>]\\n\"\n % (\n world.pov.zenith_color[0],\n world.pov.zenith_color[1],\n world.pov.zenith_color[2],\n render.alpha_filter,\n ),\n )\n if agnosticrender.film_transparent or render.alpha_mode == \"STRAIGHT\":\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.horizon_color[:]))\n # aa premult not solved with transmit 1\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\\n\" % (world.pov.zenith_color[:]))\n else:\n tab_write(file, \"[0.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.horizon_color[:]))\n tab_write(file, \"[1.0 rgbt<%.3g, %.3g, %.3g, 0>]\\n\" % (world.pov.zenith_color[:]))\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n # Sky_sphere alpha (transmit) is not translating into image alpha the same\n # way as 'background'\n\n # if world.pov.light_settings.use_indirect_light:\n # scene.pov.radio_enable=1\n\n # Maybe change the above to a function copyInternalRenderer settings when\n # user pushes a button, then:\n # scene.pov.radio_enable = world.pov.light_settings.use_indirect_light\n # and other such translations but maybe this would not be allowed either?\n\n # -----------------------------------------------------------------------------\n\n mist = world.mist_settings\n\n if mist.use_mist:\n tab_write(file, \"fog {\\n\")\n if mist.falloff == \"LINEAR\":\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) * 0.368))\n elif mist.falloff in [\"QUADRATIC\", \"INVERSE_QUADRATIC\"]: # n**2 or squrt(n)?\n tab_write(file, \"distance %.6f\\n\" % ((mist.start + mist.depth) ** 2 * 0.368))\n tab_write(\n file,\n \"color rgbt<%.3g, %.3g, %.3g, %.3g>\\n\"\n % (*world.pov.horizon_color, (1.0 - mist.intensity)),\n )\n # tab_write(file, \"fog_offset %.6f\\n\" % mist.start) #create a pov property to prepend\n # tab_write(file, \"fog_alt %.6f\\n\" % mist.height) #XXX right?\n # tab_write(file, \"turbulence 0.2\\n\")\n # tab_write(file, \"turb_depth 0.3\\n\")\n tab_write(file, \"fog_type 1\\n\") # type2 for height\n tab_write(file, \"}\\n\")\n if scene.pov.media_enable:\n tab_write(file, \"media {\\n\")\n tab_write(\n file,\n \"scattering { %d, rgb %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (\n int(scene.pov.media_scattering_type),\n scene.pov.media_diffusion_scale,\n *(scene.pov.media_diffusion_color[:]),\n ),\n )\n if scene.pov.media_scattering_type == \"5\":\n tab_write(file, \"eccentricity %.3g\\n\" % scene.pov.media_eccentricity)\n tab_write(file, \"}\\n\")\n tab_write(\n file,\n \"absorption %.12f*<%.4g, %.4g, %.4g>\\n\"\n % (scene.pov.media_absorption_scale, *(scene.pov.media_absorption_color[:])),\n )\n tab_write(file, \"\\n\")\n tab_write(file, \"samples %.d\\n\" % scene.pov.media_samples)\n tab_write(file, \"}\\n\")", "def test_visuThreeD1(self):\n\n visu_logic = slicer.modules.visuThreeDWidget.logic\n #visu_logic.set_user_table(self.user_table)\n #visu_logic.set_user_file('/work/maria5/EBDS_CIVILITY/DataShare/TestMatricesForVisualization/AAL78/PerNodeMetrics/Conte_EigenVectorCentrality_4Yr_AAL78Regions.csv')\n #visu_logic.set_user_file('/Users/Wieke/Documents/visuThreeD/neo-0042-4year_AvgSym_normFull.csv')\n # visu_logic.create_node_actors()\n # visu_logic.create_line_actors()\n # visu_logic.update()\n #visu_logic.set_node_range()", "def create_output():\n\n input_data = \"{}/{}.json\".format(TRANSCRIPTS_VIDEOS_PATH, request.form[\"name\"])\n duration = \"0,{}\".format(int(float(request.form[\"duration\"])))\n movie = \"{}/{}\".format(VIDEOS_PATH, request.form[\"movie\"]) # videos/movie.mp4\n movie_data = \"{}/{}.json\".format(TRANSCRIPTS_VIDEOS_PATH, request.form[\"movie\"]) # transcripts/movie.mp4.json\n\n scene = make_scene(OUTPUT_VIDEOS_PATH, input_data, duration, movie, movie_data, True)\n return jsonify(status=\"200\", scene=scene)", "def save_pose_to_shelf(*args):\n target_shelf = mel.eval('tabLayout -q -selectTab $gShelfTopLevel;')\n store_cmds = 'import pymel.core as pm \\n' \\\n 'import mimic_utils \\n' \\\n 'reload(mimic_utils) \\n\\n' \\\n # 'if not check_robot_selection(1): \\n' \\\n # ' robot = \\'\\' \\n\\n'\n\n start_line_code = \"[\"\n end_line_code = \"],\\n\"\n\n if not check_robot_selection(1):\n pm.warning(\"Must select exactly one robot\")\n return\n\n robot = get_robot_roots()[0]\n # Check which mode we're in\n current_tab = pm.tabLayout('switcher_tab_layout', query=True, st=True)\n\n # IK MODE\n if current_tab == 'ikTab':\n store_cmds += 'tab = 1 \\n'\n store_cmds += 'attrs = ['\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n target_ctrl_str = __TARGET_CTRL_PATH\n\n config_attrs = ['ik', 'v', 'ikSolution1', 'ikSolution2', 'ikSolution3']\n for each in config_attrs:\n find_val = pm.getAttr(target_ctrl_path + \".\" + each)\n save_to_shelf = (start_line_code\n + \"'\"\n + (target_ctrl_str + \".\" + each)\n + \"', \" + \" %f\" + end_line_code) % find_val\n\n store_cmds += save_to_shelf\n\n # If a tool controller exists, use that to keyframe transformation\n # attributes\n if pm.objExists(tool_ctrl_path):\n target_ctrl = tool_ctrl_path\n target_ctrl_str = __TOOL_CTRL_PATH\n else:\n target_ctrl = target_ctrl_path\n target_ctrl_str = __TARGET_CTRL_PATH\n\n keyable = pm.listAttr(target_ctrl,\n k=True,\n r=True,\n w=True,\n c=True,\n u=True)\n\n # Remove robotSubtype from list\n # In future rigs, this shouldn't be keyable\n if 'robotSubtype' in keyable:\n keyable.remove('robotSubtype')\n\n for each in keyable:\n find_val = pm.getAttr(target_ctrl + \".\" + each)\n save_to_shelf = (start_line_code + \"'\" + (\n target_ctrl_str + \".\" + each) + \"', \" + \" {}\".format(find_val) + end_line_code)\n store_cmds += save_to_shelf\n\n # FK MODE\n else:\n\n store_cmds += 'tab = 2 \\n'\n store_cmds += 'attrs = ['\n\n target_ctrl_path = get_target_ctrl_path(robot)\n target_ctrl_str = __TARGET_CTRL_PATH\n\n config_attrs = ['ik', 'v']\n for each in config_attrs:\n find_val = pm.getAttr(target_ctrl_path + \".\" + each)\n save_to_shelf = (start_line_code + \"'\" + (\n target_ctrl_str + \".\" + each) + \"', \" + \" %f\" + end_line_code) % find_val\n store_cmds += save_to_shelf\n\n joint_vals = [__A1_FK_CTRL_PATH,\n __A2_FK_CTRL_PATH,\n __A3_FK_CTRL_PATH,\n __A4_FK_CTRL_PATH,\n __A5_FK_CTRL_PATH,\n __A6_FK_CTRL_PATH]\n joint_val_attr = ['rotateY', 'rotateX', 'rotateX', 'rotateZ', 'rotateX', 'rotateZ']\n\n for i, each in enumerate(joint_vals):\n attrs = format_path(each + \".\" + joint_val_attr[i], robot)\n attr_str = each + \".\" + joint_val_attr[i]\n find_val = pm.getAttr(attrs)\n save_to_shelf = (start_line_code + \"'\" + attr_str + \"', \" + \" %f\" + end_line_code) % find_val\n store_cmds += save_to_shelf\n\n store_cmds += '] \\n\\n' \\\n 'mimic_utils.assign_saved_pose(attrs, tab) \\n'\n\n prompt_dialog = pm.promptDialog(t=\"Robot Pose\", m=\"Pose Name:\", b=\"Save\")\n\n # Condition statement that checks if our button gets clicked.\n # If this condition is met, then run the following commands\n if prompt_dialog == \"Save\":\n # This variable stores the Name we add to our Prompt Dialog\n prompt_dialog_name = pm.promptDialog(query=True, text=True)\n # This line creates our Shelf Button that uses MEL as the source type\n # for the commands stored in \"store_cmds\", and adds the Shelf Button\n # under our custom tab named \"Body Poses\"\n pm.shelfButton(l=prompt_dialog_name,\n annotation=prompt_dialog_name,\n imageOverlayLabel=prompt_dialog_name,\n i='commandButton.png',\n command=store_cmds,\n p=target_shelf,\n sourceType=\"python\")", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def batch_export_ortho():\r\n global path_to_project\r\n \r\n for path in path_to_project:\r\n export_filename = os.path.basename(path['ProjectPath']).replace('.psz','.tif')\r\n export_path = os.path.join(export_folder,export_filename)\r\n try:\r\n project = PhotoScan.app.document\r\n project.open(path['ProjectPath'])\r\n \r\n dx, dy = mosaic.get_resolution(path['Flight_id'], path['Field'], path['Camera'])\r\n \r\n if dx is not None and dy is not None:\r\n status = project.activeChunk.exportOrthophoto(\r\n export_path, format=\"tif\", color_correction=False, blending='average', dx=dx, dy=dy,\r\n projection=project.activeChunk.projection)\r\n else:\r\n status = project.activeChunk.exportOrthophoto(export_path, format=\"tif\", color_correction=False, blending='average',projection=project.activeChunk.projection)\r\n except Exception as e:\r\n print(e)\r\n if status is True:\r\n print(\"Perfect\")\r\n app = PhotoScan.Application()\r\n app.quit()", "def scene_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n thumbnails_path = get_directory('icons')\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if AM.scene_name not in thumb_list or AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n if AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n \r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n\r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n \r\n if AM.scene_name and ((AM.scene_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def viewAll(self):\n self._sceneviewer.viewAll()", "def __init__(self):\n Page.__init__(self, u\"Esfera, parametrización por proyecciones estereográficas\")\n\n r = .998\n esf = ParametricPlot3D(lambda t, f: (r * sin(t) * cos(f), r * sin(t) * sin(f), r * cos(t)), (0, pi, 70), (0, 2 * pi, 70))\n# esf.setAmbientColor(_1(99,136,63))\n esf.setDiffuseColor(_1(99, 136, 63))\n esf.setSpecularColor(_1(99, 136, 63))\n\n\n def proyZm1(u, v, t1):\n \"\"\"proy desde el polo norte al plano z=-1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)\n\n def proyZ1(u, v, t2):\n \"\"\"proy desde el polo sur al plano z=1\"\"\"\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)\n\n stereo = ParametricPlot3D(proyZm1, (-3, 3, 70), (-3, 3, 70))\n stereo.setLinesVisible(True)\n stereo.setMeshVisible(False)\n stereo.setMeshDiffuseColor(_1(117, 55, 79))\n\n stereo2 = ParametricPlot3D(proyZ1, (-3, 3, 70), (-3, 3, 70))\n stereo2.setLinesVisible(True)\n stereo2.setMeshVisible(False)\n stereo2.setMeshDiffuseColor(_1(80, 87, 193))\n stereo2.setTransparency(0.5)\n stereo2.setTransparencyType(8)\n\n\n baseplane = BasePlane()\n baseplane.setHeight(-1.005)\n baseplane.setRange((-4, 4, 7))\n self.addChild(esf)\n self.addChild(stereo2)\n self.addChild(stereo)\n self.addChild(baseplane)\n\n params = [stereo,stereo2]\n\n ## no queremos los controles\n for i,p in enumerate(params):\n p.parameters['t%d' % (i+1)].hide()\n\n anims = [p.parameters['t%d' % (i+1)].asAnimation() for i,p in enumerate(params)]\n self.setupAnimations(anims)", "def export_xml(self, filename, full_export = False):\n \n # Private functions to write blocks of text\n # --------------------------\n def print_openrave(f, model):\n # print_openrave - OpenRAVE data\n # For compatibility only...\n f.write( ' <Openrave>\\n')\n f.write( ' <name>{0}</name>\\n'.format(model.name))\n f.write( ' <xml>{0}</xml>\\n'.format(model.or_xml))\n f.write( ' <transf>')\n for n in model.or_transf.flat[:]:\n f.write('{0:6f}'.format(n))\n f.write( '</transf>\\n')\n f.write( ' </Openrave>\\n')\n\n # --------------------------\n def print_Points(f, model, full_export):\n # print_Points - Print all Point3D entries\n\n f.write( ' <Points>\\n')\n for i in range(model.pts3D.shape[1]):\n print_Point(f, model, i)\n \n if full_export:\n for j in range(model.pt_info[i].desc.shape[0]):\n print_observ(f, model.pt_info[i], j, \\\n self.desc_name[self.desc_type[i]])\n f.write( '</Point>\\n');\n \n f.write( ' </Points>\\n');\n\n # --------------------------\n def print_observ(f, pt, idx_pt, desc_name):\n # <Observation camera_id=\"n\" desc_type=\"SIFT\" loc=\"x;y;scale;orientation\"\n # desc=\"a;b;c;...\">\n f.write( ' <Observation ');\n f.write( 'camera_id=\"{0}\" '.format(pt.cam_id[idx_pt]))\n f.write( 'desc_type=\"{0}\" '.format(desc_name))\n f.write( 'loc=\"')\n for l in pt.locs[idx_pt, :].ravel():\n f.write('{0:6f} '.format(l))\n f.write( '\" ')\n f.write( 'desc=\"')\n for d in pt.desc[idx_pt, :].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\"/>\\n')\n\n # --------------------------\n def print_Point(f, model, idx_pt):\n # <Point p3d=\"x;y;z\" nviews=\"\" avg_err=\"\" color=\"R;G;B\" desc_type=\"SIFT\"\n # desc=\"a;b;c;...\">\n f.write( ' <Point ');\n f.write( 'p3d=\"{0:6f} {1:6f} {2:6f}\" '.format(model.pts3D[0, idx_pt], \\\n model.pts3D[1, idx_pt], \\\n model.pts3D[2, idx_pt]))\n f.write( 'nviews=\"{0:d}\" '.format(model.num_views[idx_pt]))\n f.write( 'avg_err=\"{0:6f}\" '.format(model.avg_err[idx_pt]))\n f.write( 'color=\"{0} {1} {2}\" '.format(model.color3D[0,idx_pt], \\\n model.color3D[1,idx_pt], \\\n model.color3D[2,idx_pt]))\n f.write( 'desc_type=\"{0}\" '\\\n .format(model.desc_name[ model.desc_type[idx_pt] ]))\n f.write( 'desc=\"')\n for d in model.desc[idx_pt].ravel():\n f.write( '{0:6f} '.format(d))\n f.write( '\">\\n')\n\n # --------------------------\n def print_Cameras(f, model):\n # print_Cameras - Print all Camera entries\n\n f.write( ' <Cameras>\\n')\n for idx, cam in enumerate(model.cam_poses.T):\n print_Camera(f, cam, idx)\n f.write( ' </Cameras>\\n')\n\n # --------------------------\n def print_Camera(f, cpose, idx_cam):\n # print_Camera - Camera entry\n\n f.write( ' <Camera ')\n f.write( 'id=\"{0}\" '.format(idx_cam))\n f.write( 'rot_type=\"quat\" ')\n q_t = tf_format.tf_format('quat', cpose)\n f.write( 'rot=\"')\n for val in q_t[:4].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\" ')\n f.write( 'tx=\"')\n for val in q_t[4:].ravel():\n f.write( '{0:6f} '.format(val))\n f.write( '\"/>\\n')\n\n # --------------------------\n # Print data to file\n\n # First, update structures\n self.getNumViews()\n self.getNumPointsInCam()\n self.getAverageErr()\n\n with open(filename, 'w') as f:\n f.write('<Model name=\"{0}\" version=\"{1}\">\\n'.format(self.name, \\\n self.version) )\n # print_openrave(f, model)\n print_Points(f, self, full_export)\n if full_export:\n print_Cameras(f, self)\n f.write('</Model>\\n')", "def export_as(self, version):\n raise NotImplementedError(\"export_as is not implemented\")", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def object_export(request, simulation, object_name):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n seed = np.random.randint(10000)\n filename = '{0}/website_files/exports/{1}.tsv'.format(settings.BASE_DIR,\n seed)\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n with codecs.open(filename, 'r', encoding='utf8') as f:\n # Build a response to send a file.\n response = HttpResponse(f.read())\n response['content_type'] = 'text/tab-separated-values'\n response['Content-Disposition'] = \\\n 'attachement; filename={}.tsv'.format(metro_to_user(object_name))\n # We delete the export file to save disk space.\n os.remove(filename)\n return response", "def export_robot_to_xacro_files():\n global robot, OUTPUT, NAME\n doc = Document()\n root = doc.createElement('robot')\n doc.appendChild(root)\n root.setAttribute(\"xmlns:xacro\", \"http://www.ros.org/wiki/xacro\")\n root.setAttribute(\"name\", robot.name)\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_visual_collisions.xacro'))\n create_visual_xacro()\n for i in XACRO_DICO.keys():\n print('exporting ' + NAME + '_' + i + '.xacro')\n if i.find('eye') != -1:\n export_kinematic_chain_to_xacro(i, 'HeadRoll_link',\n 'HeadRoll_link')\n else:\n export_kinematic_chain_to_xacro(i)\n filenamerobot = NAME + '_' + i + '.xacro'\n root.appendChild(ur.short(doc, 'xacro:include', 'filename',\n filenamerobot))\n # Transmission elements not available from Aldebaran libraries yet\n export_robot_element('Transmission')\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_Transmission.xacro'))\n # Gazebo Plugin not available from Aldebaran libraries yet\n export_robot_element('Gazebo')\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_Gazebo.xacro'))\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_sensors.xacro'))\n export_list_to_xacro(['_frame'], OUTPUT[0:OUTPUT.rfind('.')] +\n '_sensors.xacro')\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_fingers.xacro'))\n export_list_to_xacro(['Finger', 'Thumb'], OUTPUT[0:OUTPUT.rfind('.')] +\n '_fingers.xacro')\n if NAME == 'pepper':\n root.appendChild(ur.short(doc, 'xacro:include', 'filename', NAME +\n '_wheels.xacro'))\n export_list_to_xacro(['Wheel'], OUTPUT[0:OUTPUT.rfind('.')] +\n '_wheels.xacro')\n if NAME == 'romeo':\n root.appendChild(ur.short(doc, 'xacro:include', 'filename',\n 'romeo_cap.xacro'))\n\n filename = OUTPUT[0:OUTPUT.rfind('.')] + '_robot.xacro'\n write_comments_in_xacro(doc, filename)\n print('output directory : ' + OUTPUT[0:OUTPUT.rfind('/') + 1])", "def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)", "def list_scene(command):\n namespace = app.main(command)\n assert namespace.command == 'ls' or namespace.command == \"listscenes\"", "def run_scene(self, id):\n act = SceneAction(self, id)\n return act.invoke()", "def poseReaderRig(objs,space=1,name=None, nameOverride=0):\n if not pm.pluginInfo(\"poseReader\",q=1,loaded=1):\n pm.loadPlugin(\"poseReader.so\")\n if len(objs)<=0:\n pm.error((\"poseReaderUI: You must select one or more objects to create a poseReader node for!\"),sl=0)\n poses=[]\n # Store created nodes for sel at end\n obj=''\n for obj in objs:\n Obj=pm.util.capitalize(obj)\n # new to maya 6, tho it is a script....\n if name == None:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj + \"Shape#\"))\n else:\n if nameOverride==0:\n pose=pm.createNode(\"poseReader\",n=(\"poseReader_\" + Obj+name+'Shape'))\n elif nameOverride==1:\n if name[-5:]=='Shape':\n pose=pm.createNode(\"poseReader\",n=name)\n else:\n pose=pm.createNode(\"poseReader\",n=name+'Shape')\n attr=\"worldMatrix\"\n if space == 2:\n attr=\"matrix\"\n\n pm.connectAttr((obj + \".\" + attr),(pose + \".worldMatrixLiveIn\"),f=1)\n xform=pm.listRelatives(pose,p=1)[0]\n pm.connectAttr((xform + \".\" + attr),(pose + \".worldMatrixPoseIn\"),f=1)\n poses.append(xform)\n # Actually store xform for sel.\n # Make a keyable attr people can actually see and use.\n pm.addAttr(pose,ln=\"weight\",k=1)\n pm.connectAttr((pose + \".outWeight\"),(pose + \".weight\"),f=1)\n # Parent to same parent that object has.\n # Very important if using local space.\n parent=pm.listRelatives(obj,p=1)[0]\n if parent != \"\":\n pm.parent(xform,parent)\n # match rotate order of obj\n rotOrder = pm.getAttr(obj+'.rotateOrder')\n xform.attr('rotateOrder').set(rotOrder)\n # Snap xform to same as obj\n pCons=pm.pointConstraint(obj,xform,w=1)\n oCons=pm.orientConstraint(obj,xform,w=1)\n pm.delete(pCons,oCons)\n # Also make up animCurve for animCurve mode\n animCurve=pm.createNode('animCurveUU')\n pm.setKeyframe(animCurve,itt=\"flat\",v=1.0,ott=\"flat\",f=0.0)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.85,ott=\"spline\",f=0.25)\n pm.setKeyframe(animCurve,itt=\"spline\",v=0.15,ott=\"spline\",f=0.75)\n pm.setKeyframe(animCurve,itt=\"flat\",v=0.0,ott=\"flat\",f=1.0)\n pm.connectAttr((animCurve + \".message\"),(pose + \".msgAnimCurve\"),f=1)\n pm.connectAttr((animCurve + \".output\"),(pose + \".animCurveOutput\"),f=1)\n\n pm.select(poses,r=1)\n # Now if we have more than one pose...connect them up to a multiTrigger node\n nPoses=len(poses)\n if nPoses>1:\n trig=pm.createNode(\"multiTrigger\")\n # Make a keyable attr people can actually see and use.\n pm.addAttr(trig,ln=\"weight\",k=1)\n pm.connectAttr((trig + \".outWeight\"),(trig + \".weight\"),f=1)\n i=0\n for i in range(0,nPoses):\n pm.connectAttr((poses[i] + \".weight\"),(trig + \".inputValues[\" + str(i) + \"]\"),f=1)\n pm.select(poses,trig,r=1)\n return pose", "def vsav():\n if not sys.argv[1:]:\n sys.stderr.write(vsav.__doc__.strip() + '\\n')\n else:\n for src in sys.argv[1:]:\n version_util.save(src)", "def filemenu_Export(self):\n line_dict = {}\n for line in self.lines.values():\n for name, arr in line.to_mat().items():\n line_dict[name] = arr\n fileTypes = [(\"MATLAB file\",\"*.mat\"), (\"NumPy file\",\"*.npz\")]\n options = {}\n options['initialdir'] = os.path.expanduser('~')\n options['filetypes'] = fileTypes\n options['parent'] = self.master\n filename = filedialog.asksaveasfilename(**options)\n if filename:\n _, ext = os.path.splitext(filename)\n if ext == \".mat\":\n sio.savemat(filename, line_dict)\n elif ext == \".npz\":\n np.savez(filename, lines=line_dict)" ]
[ "0.6807009", "0.61583364", "0.60997474", "0.6089782", "0.5898155", "0.5882427", "0.5869277", "0.58672017", "0.5798506", "0.56990576", "0.5693678", "0.56637156", "0.5612492", "0.5589226", "0.553898", "0.5532267", "0.55292517", "0.55200547", "0.5514314", "0.5509473", "0.54984117", "0.5487525", "0.5467995", "0.54168653", "0.5416319", "0.5412217", "0.5410536", "0.54101694", "0.54058725", "0.5381438", "0.5378143", "0.53662544", "0.53603446", "0.5344376", "0.5342433", "0.5328704", "0.53218436", "0.5315008", "0.53007066", "0.52854156", "0.5283664", "0.52764887", "0.52536726", "0.5246622", "0.5244708", "0.5235787", "0.5217021", "0.52156717", "0.5215434", "0.5206144", "0.5171957", "0.51665014", "0.51659566", "0.5165507", "0.51651007", "0.5155593", "0.5144929", "0.514164", "0.5133877", "0.5133542", "0.51312566", "0.5120495", "0.5114821", "0.5111678", "0.5104244", "0.51010406", "0.50847256", "0.50779635", "0.5063367", "0.5063132", "0.50537", "0.5052242", "0.50492483", "0.50469595", "0.50464356", "0.5039131", "0.5035695", "0.50241923", "0.50217247", "0.5021704", "0.5008859", "0.50086164", "0.49861366", "0.49854934", "0.49842533", "0.49798673", "0.49737114", "0.49694055", "0.49692637", "0.49631023", "0.49619392", "0.4960656", "0.49603385", "0.49489096", "0.49423662", "0.4941509", "0.49358377", "0.49324483", "0.49305534", "0.49242848" ]
0.6433381
1
import all data into scene
def importAll(self, imdata = True, imlights = True, imaovs = True, imshaders = True, immaster = True, asset = '', searchAndReplace = ['',''] ): if immaster: self.importMasterSettings() if imlights and self.lightPath.exists: self.importLights( asset, searchAndReplace ) if imaovs and self.aovsPath.exists: self.importAovs() if imshaders and self.shaderPath.exists: self.importShaders() if imdata and self.dataPath.exists: self.importData( asset, searchAndReplace )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self):", "def import_scene(file_path):\n\n pass", "def load_data(self) -> None:", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def load(self):", "def import_all(self) -> None:\n with open(normpath('levels/level/lvl1.txt'), 'r') as f:\n while f:\n string = f.readline()\n if string == '':\n break\n string = string.strip().split(' ')\n if len(string) == 4:\n self.objects.append(pygame.Rect(int(string[0]), int(\n string[1]), int(string[2]), int(string[3])))\n for i in range(len(self.objects)):\n self.color.append(colors[random.randint(0, len(colors)-1)])", "def on_import(self, event=None):\n if event is not None:\n event.Skip()\n data_id, theory_id, state_id = self.set_data_helper()\n temp = data_id + state_id\n self.parent.set_data(data_id=temp, theory_id=theory_id)", "def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n # cmds.file(absSceneFile, i=True)\n nuke.nodePaste(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(210, msg)\n return -1, msg", "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def _CMD_IMPORT(self, file_name):\n # reset inspector:\n # self.inspector = DataInspectorRecord()\n\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n # self.model.from_json_dict(buff)\n self.model.from_mat_file(file_name)\n\n elif ext == 'json':\n buff = ''\n with open(file_name, 'rb') as f:\n buff = f.read()\n model = json.loads(buff)\n self.model.from_json_dict(model)\n\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))\n\n # update initial selection - first row:\n if len(self.model.data_list) > 0:\n self.handle_row_select([self.model.data_list[0]])", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def import_region(self, region_id, action=\"import\"):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})\n for groupid, scenegroup in scenedata['res'].items():\n getattr(self, action+\"_group\")(groupid, scenegroup, 10)\n self.queueRedraw('VIEW3D')", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def import_game_graph(self):\n self._import_win()\n self._import_loose()", "def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalItem in actions:\n\n # extract item info\n itemType = int(graphicalItem['item-type'])\n itemId = graphicalItem['item-id']\n if sys.version_info > (3,): # py3 support\n graphicalItem['item-text'] = graphicalItem['item-text']\n else:\n graphicalItem['item-text'] = graphicalItem['item-text'].decode('utf8')\n itemText = graphicalItem['item-text']\n posX = float(graphicalItem['pos-x'])\n posY = float(graphicalItem['pos-y'])\n itemData = graphicalItem['item-data']\n\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=QPointF(posX,posY), itemData=itemData )\n \n # kept the max id\n if int(itemId) > maxItemId:\n maxItemId = int(itemId)\n \n self.itemId = maxItemId\n\n # endly draw all arrows\n for curItem in self.scene.items():\n for saveItem in actions:\n if not isinstance(curItem, DiagramItem):\n continue\n if curItem.itemId == int(saveItem['item-id']):\n if 'item-links' in saveItem:\n if isinstance(saveItem['item-links'], dict):\n saveItem['item-links'] = [saveItem['item-links']]\n for lnk in saveItem['item-links']:\n itemId = lnk['next-item-id']\n toHotspotId = lnk['to-hotspot-id']\n fromHotspotId = lnk['from-hotspot-id']\n \n endItem = self.findItem(id=itemId)\n if endItem is not None:\n self.trace( \"Arrow: %s -> %s\" % (fromHotspotId,toHotspotId) )\n arrow = Arrow(curItem, endItem, toHotspotId=toHotspotId, fromHotspotId=fromHotspotId)\n arrow.setColor(self.scene.myLineColor)\n curItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.scene.addItem(arrow)\n arrow.updatePosition()", "def load_nodes(self):\n # Start with root nodes in the scene\n for node_id in self.gltf.scenes[0].nodes:\n node = self.load_node(self.gltf.nodes[node_id])\n self.scene.root_nodes.append(node)", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()", "def load(self, index):\n selected = self.games[index]\n try:\n with open(path.join(self.saved_games, selected)) as f:\n self.game_data['game_data'] = json.load(f)\n self.game_data['file_name'] = selected\n self.game_data['loaded'] = True\n self.game_data['next'] = False\n super().set_state(TRANSITION_OUT)\n logger.info('Load : %s', selected)\n except EnvironmentError as e:\n logger.exception(e)\n\n try:\n self.load_minimap()\n except EnvironmentError as e:\n logger.exception(e)", "def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def loadProducts():\n dump = os.path.dirname(os.path.abspath(__file__)) + \"/dump.json\"\n data = open(dump, 'r')\n for deserialized_object in serializers.deserialize(\"json\", data):\n deserialized_object.save()", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load(self):\n\n if self.loaded:\n return\n\n self.region_back = None\n self.objects = []\n self.plants = []\n self.tiles = []\n\n # Some convenience vars\n materials = self.data.materials\n matmods = self.data.matmods\n objects = self.data.objects\n plants = self.data.plants\n world = self.world\n self.loaded = True\n\n # Get tiles\n try:\n data_tiles = world.get_tiles(self.rx, self.ry)\n except KeyError:\n print('WARNING: Region ({}, {}) was not found in world'.format(self.rx, self.ry))\n return\n\n # \"real\" coordinates\n base_x = self.rx*32\n gui_x = base_x*8\n base_y = self.ry*32\n gui_y = (world.height*8)-(base_y*8)\n\n # Background for our drawn area (black)\n self.region_back = self.scene.addRect(gui_x, gui_y-255, 255, 255,\n QtGui.QPen(QtGui.QColor(0, 0, 0)),\n QtGui.QBrush(QtGui.QColor(0, 0, 0)),\n )\n self.region_back.setZValue(Constants.z_black)\n\n # Tiles!\n cur_row = 0\n cur_col = 0\n for data_tile in data_tiles:\n self.tiles.append(GUITile(self.scene, data_tile,\n base_x+cur_col, base_y+cur_row,\n self,\n gui_x+cur_col*8, gui_y-(cur_row+1)*8,\n self.layer_toggles))\n self.scene.addItem(self.tiles[-1])\n cur_col += 1\n if cur_col == 32:\n cur_col = 0\n cur_row += 1\n\n # Entities!\n entities = []\n try:\n entities = world.get_entities(self.rx, self.ry)\n except KeyError:\n pass\n\n for e in entities:\n if e.name == 'ObjectEntity':\n obj_name = e.data['name']\n obj_orientation = e.data['orientationIndex']\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n if obj_name in objects:\n obj = objects[obj_name]\n (image, offset_x, offset_y) = obj.get_image(obj_orientation)\n qpmi = QtWidgets.QGraphicsPixmapItem(image)\n qpmi.setPos(\n (obj_x*8) + offset_x,\n (world.height*8)-(obj_y*8) - offset_y - image.height(),\n )\n qpmi.setZValue(Constants.z_objects)\n if not self.layer_toggles.objects_toggle.isChecked():\n qpmi.setVisible(False)\n self.scene.addItem(qpmi)\n self.objects.append(qpmi)\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_object(obj, obj_name, obj_orientation, qpmi, e.data)\n elif e.name == 'PlantEntity':\n desc = e.data['descriptions']['description']\n images = []\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n for piece in e.data['pieces']:\n piece_img = piece['image'].split('?')[0]\n if piece_img in plants:\n img = plants[piece_img].image\n qpmi = QtWidgets.QGraphicsPixmapItem(img)\n qpmi.setPos(\n (obj_x*8) + (piece['offset'][0]*8),\n (world.height*8)-(obj_y*8) - (piece['offset'][1]*8) - img.height(),\n )\n qpmi.setZValue(Constants.z_plants)\n if not self.layer_toggles.plants_toggle.isChecked():\n qpmi.setVisible(False)\n images.append((plants[piece_img], qpmi))\n self.scene.addItem(qpmi)\n self.plants.append(qpmi)\n else:\n print('not found: {}'.format(piece_img))\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_plant(desc, images)\n elif (e.name == 'MonsterEntity'\n or e.name == 'NpcEntity'\n or e.name == 'StagehandEntity'\n or e.name == 'ItemDropEntity'\n or e.name == 'VehicleEntity'\n ):\n # TODO: Ignoring for now\n pass\n else:\n print('Unknown entity type: {}'.format(e.name))", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def load_raw_data(apps, schema_editor):\n from season.import_raw_data import InitialDataProcessor\n matches_path = str(BASE_DIR) + '/season/migrations/matches.csv'\n deliveries_path = str(BASE_DIR) + '/season/migrations/deliveries.csv'\n # Initialization path to read data\n load_data = InitialDataProcessor(matches_path=matches_path, deliveries_path=deliveries_path)\n # transform data frame and save the data step by step\n # only support new season import for the first tym when data structure is ready to use\n load_data.transform_input_save()", "def initialize_project(self, force_import_from_game=False, with_window: ProjectWindow = None, first_time=False):\n\n yes_to_all = force_import_from_game\n\n for data_type in self.DATA_TYPES:\n yes_to_all = self.import_data_type(data_type, force_import_from_game, yes_to_all, with_window=with_window)\n\n if data_type == \"maps\" and first_time and self.maps is not None:\n archives_msb = self.maps.DukesArchives\n repeats = archives_msb.get_repeated_entity_ids()\n if {e.entity_id for e in repeats[\"Regions\"]} == {1702745, 1702746, 1702747, 1702748}:\n if self.offer_fix_broken_regions(with_window):\n self.save(\"maps\")\n if self.offer_translate_entities(with_window=with_window):\n self.save(\"maps\")\n self.offer_entities_export(with_window=with_window)\n\n if \"events\" in self.DATA_TYPES:\n self.import_events(force_import=yes_to_all, with_window=with_window)\n\n if \"talk\" in self.DATA_TYPES:\n self.import_talk(force_import=yes_to_all, with_window=with_window)", "def _collect_scene_data(self, config):\n\n self._config = config\n self.scenes_root_path = config['scenes_root_path']\n assert(os.path.isdir(self.scenes_root_path))\n\n self._scene_dict = dict()\n # each one is a list of scenes\n self._all_image_paths = {\"train\": [], \"test\": []}\n\n for key, val in self._all_image_paths.items():\n for scene_collection_name in config[key]:\n scene_collection_dir = os.path.join(self.scenes_root_path, scene_collection_name)\n assert os.path.isdir(scene_collection_dir), scene_collection_dir\n # Scan all scenes in this scene dir\n for scene_name in os.listdir(scene_collection_dir):\n full = os.path.join(scene_collection_dir, scene_name)\n if os.path.isdir(full):\n val += self._get_all_rgb_image_paths_in_scene_dir(full)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def import_data_to_database(self, database_type, data):\n\n if database_type == \"render\":\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO render_information\n VALUES (?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet render information to database\")\n if database_type == \"object\":\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO object_information\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet objectinformation information to database\")\n if database_type == \"output\":\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO output_information\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,\n ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet outputinformation information to database\")\n \n\n return", "def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data", "def import_data(self):\n\n # Import ordered names of origins\n origins_file = os.path.join(self.data_directory,'origins.txt')\n self.origins = np.loadtxt(origins_file,dtype=str,ndmin=1)\n\n # Import ordered names of destinations\n destinations_file = os.path.join(self.data_directory,'destinations.txt')\n self.destinations = np.loadtxt(destinations_file,dtype=str,ndmin=1)\n\n # Import origin supply\n originsupply_file = os.path.join(self.data_directory,'origin_supply.txt')\n self.origin_supply = np.loadtxt(originsupply_file,ndmin=1).astype('float64')\n\n # In case origin supply is not a list\n if not isinstance(self.origin_supply,(np.ndarray, np.generic)):\n self.origin_supply = np.array([self.origin_supply])\n\n # Import destination demand\n destinationdemand_file = os.path.join(self.data_directory,'destination_demand.txt')\n self.destination_demand = np.loadtxt(destinationdemand_file,ndmin=1).astype('float64')\n\n # In case destination demand is not a list\n if not isinstance(self.destination_demand,(np.ndarray, np.generic)):\n self.destination_demand = np.array([self.destination_demand])\n\n # Import origin locations\n originlocations_file = os.path.join(self.data_directory,'origin_locations.txt')\n self.origin_locations = np.loadtxt(originlocations_file,ndmin=1)\n\n # Import destination locations\n destinationlocations_file = os.path.join(self.data_directory,'destination_locations.txt')\n self.destination_locations = np.loadtxt(destinationlocations_file,ndmin=1)\n\n # Import initial and final destination sizes\n initialdestinationsizes_file = os.path.join(self.data_directory,'initial_destination_sizes.txt')\n self.initial_destination_sizes = np.loadtxt(initialdestinationsizes_file,ndmin=1)\n\n # In case destination sizes are not a list\n if not isinstance(self.initial_destination_sizes,(np.ndarray, np.generic)):\n self.initial_destination_sizes = np.array([self.initial_destination_sizes])\n\n # Import N,M\n self.N = self.origin_supply.shape[0]\n self.M = self.initial_destination_sizes.shape[0]\n\n # Import cost matrix\n costmatrix_file = os.path.join(self.data_directory,'cost_matrix.txt')\n self.cost_matrix = np.loadtxt(costmatrix_file).astype('float64')\n\n # Reshape cost matrix if necessary\n if self.N == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[:,np.newaxis],(self.N,self.M))\n if self.M == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[np.newaxis,:],(self.N,self.M))\n\n # Compute total initial and final destination sizes\n self.total_initial_sizes = np.sum(self.initial_destination_sizes)\n\n # Compute naive total cost\n self.total_cost = 0\n for i in range(self.N):\n for j in range(self.M):\n self.total_cost += self.cost_matrix[i,j]*(self.origin_supply[i]/self.N)", "def _load_training_data(self):\n self._save_training_data()", "def load_static():\n\n for i, row in enumerate(open(\"seed_data/homepage_feature.static\")):\n row = row.rstrip()\n title, body, img_path_xs, img_path_sm, img_path_md, img_path_lg, is_active = row.split(\"|\")\n homepage_feature = HomepageFeatureModel(title=title,\n body=body,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg,\n is_active=is_active)\n db.session.add(homepage_feature)\n\n for i, row in enumerate(open(\"seed_data/help_article.static\")):\n row = row.rstrip()\n title, description, body = row.split(\"|\")\n help_article = HelpArticleModel(title=title, \n description=description, \n body=body)\n db.session.add(help_article)\n\n db.session.commit()", "def load_data(self):\n raise NotImplementedError()", "def run_dataset(data: DataSetBase) -> None:\n\n tracks_manager = data.load_tracks_manager()\n reconstructions = data.load_reconstruction()\n\n all_shot_ids = set(tracks_manager.get_shot_ids())\n for r in reconstructions:\n for shot in r.shots.values():\n if shot.id in all_shot_ids:\n vertices, faces = mesh.triangle_mesh(shot.id, r, tracks_manager)\n shot.mesh.vertices = vertices\n shot.mesh.faces = faces\n\n data.save_reconstruction(\n reconstructions, filename=\"reconstruction.meshed.json\", minify=True\n )", "def load(self):\n canSave = self.canSave\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpack('4s3i',16,'REC_HEAD')\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #--LEVC?\n if name == 'LEVC':\n levc = Levc(name,size,delFlag,recFlag,ins,True)\n self.levcs[levc.id] = levc\n if canSave: self.records.append(levc)\n #print ' Added:',levc.id\n elif name == 'LEVI':\n levi = Levi(name,size,delFlag,recFlag,ins,True)\n self.levis[levi.id] = levi\n if canSave: self.records.append(levi)\n #print ' Added:',levi.id\n #--Other\n elif canSave:\n record = Record(name,size,delFlag,recFlag,ins)\n self.records.append(record)\n else:\n ins.seek(size,1,'Record')\n #--Done Reading\n ins.close()", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)", "def import_data(self, file, import_type):\n if import_type == \"data\":\n collection = self.prog_logs\n elif import_type == \"log\":\n collection = self.monk_logs\n elif import_type == \"food\":\n collection = self.food_logs\n else:\n collection = \"\"\n logging.error(\"Invalid Type\")\n exit(1)\n try:\n fl = open(file)\n data = fl.readlines()\n for line in data:\n collection.insert_one(json.loads(line))\n except FileNotFoundError:\n logging.error(\"File Not Found\")\n exit(1)", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def loadData(catalog):\r\n controller.loadData(catalog)", "def afterLoadSceneObject(self):\n\t\tpass", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass", "def load_data(self):\n # Get Paths Setup\n self.dir = path.dirname(__file__)\n self.img_dir = path.join(self.dir, 'img')\n self.snd_dir = path.join(self.dir, 'snd')\n\n # Load High Score\n try:\n with open(path.join(self.dir, HIGH_SCORE_FILE), 'r') as f:\n self.highscore = int(f.read())\n except FileNotFoundError:\n self.highscore = 0\n\n # Load Images / Background\n self.player_image = pg.image.load(path.join(self.img_dir, PLAYER_FILE))\n self.planet_images = []\n for i in range(1, 11):\n self.planet_images.append(pg.image.load(path.join(self.img_dir, 'planets', 'p{}shaded.png'.format(i)))\n .convert())\n self.moon_images = []\n for i in range(1, 4):\n self.moon_images.append(pg.image.load(path.join(self.img_dir, 'moons', 'Moon{}.png'.format(i))).convert())\n self.sun_image = pg.image.load(path.join(self.img_dir, SUN_FILE)).convert()\n self.fuel_image = pg.image.load(path.join(self.img_dir, 'pickups', FUEL_FILE)).convert()\n self.arrow_image = pg.image.load(path.join(self.img_dir, ARROW_FILE)).convert()\n # LOAD BACKGROUNDS\n self.background = pg.image.load(path.join(self.img_dir, BACKGROUND_FILE)).convert()\n self.background_rect = self.background.get_rect()\n print(f'BACKGROUND WIDTH, HEIGHT: {self.background_rect.width}, {self.background_rect.height}')\n self.loadscreen = pg.image.load(path.join(self.img_dir, START_SCREEN_FILE)).convert()\n self.loadscreen_rect = self.loadscreen.get_rect()\n\n # BUILDING EXPLOSION ANIMATIONS\n self.explosion_animation = {'lg': [], 'sm': [], 'tiny': [], 'player': []}\n for i in range(0, 9):\n filename = 'tank_explosion{}.png'.format(i)\n img = pg.image.load(path.join(self.img_dir, 'explosions', filename)).convert()\n img.set_colorkey(BLACK)\n img_lg = pg.transform.scale(img, (50, 50))\n self.explosion_animation['lg'].append(img_lg)\n img_sm = pg.transform.scale(img, (10, 10))\n self.explosion_animation['sm'].append(img_sm)\n img_tiny = pg.transform.scale(img, (6, 6))\n self.explosion_animation['tiny'].append(img_tiny)\n filename = 'sonicExplosion0{}.png'.format(i)\n img_pl = pg.image.load(path.join(self.img_dir, 'explosions', filename)).convert()\n img = pg.transform.scale(img_pl, (80, 80))\n self.explosion_animation['player'].append(img)\n\n # Load Sounds / Music\n self.crash_sound = pg.mixer.Sound(path.join(self.snd_dir, CRASH_SND_FILE))\n self.crash_sound.set_volume(.4)\n self.moon_crash_sound = pg.mixer.Sound(path.join(self.snd_dir, MOON_CRASH_SND_FILE))\n self.moon_crash_sound.set_volume(.03)\n self.player_crash_sound = pg.mixer.Sound(path.join(self.snd_dir, PLAYER_CRASH_SND_FILE))\n self.player_crash_sound.set_volume(.9)\n self.launch_sound = pg.mixer.Sound(path.join(self.snd_dir, JUMP_SND_FILE))\n self.launch_sound.set_volume(.8)\n self.jump_sector_sound = pg.mixer.Sound(path.join(self.snd_dir, JUMP_SECTOR_SND_FILE))\n self.jump_sector_sound.set_volume(1)\n self.jetpack_sound = pg.mixer.Sound(path.join(self.snd_dir, JETPACK_SND_FILE))\n self.jetpack_sound.set_volume(.3)", "def import_data(self):\n\t\tif not self.log_files or len(self.log_files) ==0:\n\t\t\tprint \"There is no log files need to import into database\"\n\t\telse:\n\t\t\tfor log_file in self.log_files:\n\t\t\t\tdata = self.read_file(log_file)\n\t\t\t\tself.conn.insert(data)", "def load_data():\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def fill_import_section():\n section = _SectionData(\"Import\")\n section.props.append((\"ImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_scale)))\n section.props.append((\"PreservePathForExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_preserve_path_for_export))))\n section.props.append((\"ImportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pim_file))))\n section.props.append((\"UseWelding\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_welding))))\n section.props.append((\"WeldingPrecision\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_welding_precision))))\n section.props.append((\"UseNormals\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_normals))))\n section.props.append((\"ImportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pit_file))))\n section.props.append((\"LoadTextures\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_load_textures))))\n section.props.append((\"ImportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pic_file))))\n section.props.append((\"ImportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pip_file))))\n section.props.append((\"ImportPisFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pis_file))))\n section.props.append((\"ConnectedBones\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_connected_bones))))\n section.props.append((\"BoneImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_bone_scale)))\n section.props.append((\"ImportPiaFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pia_file))))\n section.props.append((\"IncludeSubdirsForPia\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_include_subdirs_for_pia))))\n return section", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def load_inputs():\n\n print \"Daily inputs\"\n\n Daily_Input.query.delete()\n\n\n for row in open(\"seed_data/u.input.txt\"):\n row = row.rstrip()\n input_id, date, user_id, sleep, exercise, screen_time, well_being_rating = row.split(\"|\")\n\n date = datetime.strptime(date, \"%m-%d-%y\")\n \n daily_input = Daily_Input(input_id=input_id, date=date, user_id=user_id, sleep=sleep, exercise=exercise, screen_time=screen_time, well_being_rating=well_being_rating)\n db.session.add(daily_input)\n\n db.session.commit()", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def load_database(self, main_class):\n main_class.database.delete_all(\"render\")\n main_class.database.delete_all(\"object\")\n #main_class.database.delete_all(\"output\")\n render_csv = os.path.join(self.filepath, \"Render_data.csv\")\n object_csv = os.path.join(self.filepath, \"Obj_data.csv\")\n main_class.database.import_excel(render_csv, \"render\")\n main_class.database.import_excel(object_csv, \"object\")\n\n render_dic=main_class.database.get_database_dict(\"render\")\n\n main_class.render_database = main_class.database.get_data_from_database(\"render\")\n main_class.object_database = main_class.database.get_data_from_database(\"object\")\n\n main_class.background_picture_list = main_class.database.get_background_pictures_names()\n main_class.packaging_picture_list = main_class.database.get_bubble_wrap_pictures_names()\n\n main_class.camera_settings.append([0, 0, 0, 0, 100])\n for obj in main_class.render_database:\n \"\"\"\n extracting Camerasetting from Database and set all important angles and distances\n \"\"\"\n if obj[render_dic[\"object_type\"]] == \"camera\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n print(\"camera position added: \",position)\n main_class.camera_settings.append(position)\n \n if obj[render_dic[\"object_type\"]]==\"light\":\n\n if obj[render_dic[\"name\"]]==\"SUN\":\n radius= obj[render_dic[\"radius\"]]\n try:\n radius = float( radius.replace(',','.'))\n except:\n pass\n light_obj=[ obj[render_dic[\"name\"]] , [0,0, radius ] ]\n main_class.light_settings.append(light_obj)\n print(\"sun added to list\")\n\n if obj[render_dic[\"name\"]]==\"SPOT\":\n for i in range(0, int(obj[render_dic[\"polar_angle_segments\"]])):\n for j in range(0, int(obj[render_dic[\"azimuth_angle_segments\"]])):\n pol_min = obj[render_dic[\"polar_angle_min\"]]\n pol_max = obj[render_dic[\"polar_anglel_max\"]]\n pol_segments= obj[render_dic[\"polar_angle_segments\"]]\n pol_random=obj[render_dic[\"polar_angle_random_rad\"]]\n try:\n pol_min = float( pol_min.replace(',','.'))\n except:\n pass\n try:\n pol_max = float( pol_max.replace(',','.'))\n except:\n pass\n try:\n pol_segments = float( pol_segments.replace(',','.'))\n except:\n pass\n try:\n pol_random = float( pol_random.replace(',','.'))\n except:\n pass\n polar_angle = (pol_min + ((pol_max - pol_min)/(pol_segments))*i)\n\n azi_min = obj[render_dic[\"azimuth_angle_min\"]]\n azi_max = obj[render_dic[\"azimuth_angle_max\"]]\n azi_segments= obj[render_dic[\"azimuth_angle_segments\"]]\n azi_random= obj[render_dic[\"azimuth_angle_random_rad\"]]\n try:\n azi_min = float( azi_min.replace(',','.'))\n except:\n pass\n try:\n azi_max = float( azi_max.replace(',','.'))\n except:\n pass\n try:\n azi_segments = float( azi_segments.replace(',','.'))\n except:\n pass\n try:\n azi_random = float( azi_random.replace(',','.'))\n except:\n pass\n azimuth_angle = (azi_min + ((azi_max - azi_min)/(azi_segments))*j)\n position=[polar_angle, pol_random, azimuth_angle, azi_random, obj[render_dic[\"radius\"]] ]\n light_obj=[ obj[render_dic[\"name\"]] , position, obj[render_dic[\"tracking_obj\"]],1000 ]\n print(\"added light_obj: \", light_obj)\n main_class.light_settings.append(light_obj)\n main_class.max_loop_count=len(main_class.camera_settings)*len(main_class.light_settings)\n print(\"loop count is:\", main_class.max_loop_count)\n return", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def _import(self, __button):\r\n# WARNING: Refactor _import; current McCabe Complexity metric = 18.\r\n Widgets.set_cursor(self.modulebook.mdcRTK, gtk.gdk.WATCH)\r\n\r\n _import_errors = 0\r\n self._import_log.info('The following records could not be imported to '\r\n 'the open RTK database:\\n')\r\n\r\n # Find the number of existing incidents.\r\n if Configuration.BACKEND == 'mysql':\r\n _query = \"SELECT COUNT(*) FROM rtk_incident\"\r\n elif Configuration.BACKEND == 'sqlite3':\r\n _query = \"SELECT COALESCE(MAX(fld_incident_id)+1, 0) \\\r\n FROM rtk_incident\"\r\n (_num_incidents, _error_code, __) = self._dao.execute(_query,\r\n commit=False)\r\n for i in range(len(self._file_contents) - 1):\r\n _contents = []\r\n\r\n for j in range(len(self._file_index)):\r\n if self._file_index[j] == -1:\r\n _contents.append('')\r\n else:\r\n try:\r\n _contents.append(\r\n self._file_contents[i][self._file_index[j]])\r\n except IndexError:\r\n _contents.append('')\r\n\r\n _contents[14] = _contents[14].replace('$', '')\r\n\r\n # Remove any single and double quotes from the description and\r\n # remarks fields.\r\n for j in[4, 5, 8]:\r\n _contents[j] = _contents[j].replace('\\'', '')\r\n _contents[j] = _contents[j].replace('\\\"', '')\r\n\r\n # Remove any commas that may be in numerical fields.\r\n for j in [12, 14, 15]:\r\n _contents[j] = _contents[j].replace(',', '')\r\n\r\n # Convert all the date fields to ordinal dates.\r\n for j in [19, 22, 25, 28]:\r\n _contents[j] = Utilities.date_to_ordinal(_contents[j])\r\n\r\n # Convert missing values to correct default value.\r\n for j in [0, 1, 2, 3, 6, 7, 13, 15, 18, 20, 21, 23, 24, 26, 27,\r\n 29, 31, 32, 35, 36, 37, 38, 39]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), 0)\r\n except ValueError:\r\n _contents[j] = 0\r\n\r\n for j in [16, 17]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), -1)\r\n except ValueError:\r\n _contents[j] = -1\r\n\r\n for j in [12, 14, 33]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n float(_contents[j]), 0.0)\r\n except ValueError:\r\n _contents[j] = 0.0\r\n\r\n for j in [9, 34]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), 1)\r\n except ValueError:\r\n _contents[j] = 1\r\n\r\n if _contents[1] == 0 or _contents[1] is None or _contents[1] == '':\r\n _contents[1] = _num_incidents[0][0] + i + 1\r\n\r\n _query = \"INSERT INTO rtk_incident \\\r\n (fld_revision_id, fld_incident_id, \\\r\n fld_incident_category, fld_incident_type, \\\r\n fld_short_description, fld_long_description, \\\r\n fld_criticality, fld_detection_method, fld_remarks, \\\r\n fld_status, fld_test_found, fld_test_case, \\\r\n fld_execution_time, fld_unit, fld_cost, \\\r\n fld_incident_age, fld_hardware_id, fld_sftwr_id, \\\r\n fld_request_by, fld_request_date, fld_reviewed, \\\r\n fld_reviewed_by, fld_reviewed_date, fld_approved, \\\r\n fld_approved_by, fld_approved_date, fld_complete, \\\r\n fld_complete_by, fld_complete_date, fld_life_cycle, \\\r\n fld_analysis, fld_accepted) \\\r\n VALUES ({0:d}, {1:d}, {2:d}, {3:d}, '{4:s}', '{5:s}', \\\r\n {6:d}, {7:d}, '{8:s}', {9:d}, '{10:s}', \\\r\n '{11:s}', {12:f}, {13:d}, {14:f}, {15:d}, \\\r\n {16:d}, {17:d}, {18:d}, {19:d}, {20:d}, \\\r\n {21:d}, {22:d}, {23:d}, {24:d}, {25:d}, \\\r\n {26:d}, {27:d}, {28:d}, {29:d}, '{30:s}', \\\r\n {31:d})\".format(_contents[0], _contents[1],\r\n _contents[2], _contents[3],\r\n _contents[4], _contents[5],\r\n _contents[6], _contents[7],\r\n _contents[8], _contents[9],\r\n _contents[10], _contents[11],\r\n _contents[12], _contents[13],\r\n _contents[14], _contents[15],\r\n _contents[16], _contents[17],\r\n _contents[18], _contents[19],\r\n _contents[20], _contents[21],\r\n _contents[22], _contents[23],\r\n _contents[24], _contents[25],\r\n _contents[26], _contents[27],\r\n _contents[28], _contents[29],\r\n _contents[30], _contents[31])\r\n (_results,\r\n _error_code, __) = self._dao.execute(_query, commit=True)\r\n\r\n if _error_code == 0:\r\n _query = \"INSERT INTO rtk_incident_detail \\\r\n (fld_incident_id, fld_component_id, \\\r\n fld_age_at_incident, fld_failure, fld_suspension, \\\r\n fld_cnd_nff, fld_occ_fault, \\\r\n fld_initial_installation, fld_interval_censored) \\\r\n VALUES ({0:d}, {1:d}, {2:f}, {3:d}, \\\r\n {4:d}, {5:d}, {6:d}, {7:d}, \\\r\n {8:d})\".format(_contents[1], _contents[32],\r\n _contents[33], _contents[34],\r\n _contents[35], _contents[36],\r\n _contents[37], _contents[38],\r\n _contents[39])\r\n (_results,\r\n _error_code, __) = self._dao.execute(_query, commit=True)\r\n else:\r\n self._import_log.info('{0:d} - {1:s}'.format(_contents[1],\r\n _contents[4]))\r\n _import_errors += 1\r\n\r\n if _import_errors > 0:\r\n Widgets.rtk_information(_(u\"Error importing {0:d} program \"\r\n u\"incidents. Refer to the import log \"\r\n u\"{1:s} for more details.\").format(\r\n _import_errors, self._import_log))\r\n\r\n Widgets.set_cursor(self.modulebook.mdcRTK, gtk.gdk.LEFT_PTR)\r\n\r\n # Reload the Incident class gtk.TreeView().\r\n self._modulebook.request_load_data(self._dao, self._revision_id)\r\n\r\n return False", "def loadParts(self):\n for i in range(15):\n self.model_parts[i] = loadModel(\"ato_{}.pkl\".format(str(i)))", "def load(self, *args, **kwargs):\n pass", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def read_layout(outFile=None, linked=False, append=False):\n from cgl.plugins.blender.lumbermill import scene_object, LumberObject, import_file\n from cgl.core.utils.read_write import load_json\n import bpy\n\n if outFile == None:\n outFileObject = scene_object().copy(ext='json', task='lay', user='publish').latest_version()\n outFileObject.set_attr(filename='%s_%s_%s.%s' % (outFileObject.seq,\n outFileObject.shot,\n outFileObject.task,\n 'json'\n ))\n outFile = outFileObject.path_root\n # outFile = scene_object().path_root.replace(scene_object().ext, 'json')\n\n\n\n data = load_json(outFile)\n\n for p in data:\n print(p)\n data_path = data[p]['source_path']\n blender_transform = data[p]['blender_transform']\n\n transform_data = []\n for value in blender_transform:\n transform_data.append(value)\n\n print(transform_data)\n\n pathToFile = os.path.join(scene_object().root, data_path)\n lumberObject = LumberObject(pathToFile)\n\n\n\n if lumberObject.filename in bpy.data.libraries:\n lib = bpy.data.libraries[lumberObject.filename]\n bpy.data.batch_remove(ids=([lib]))\n import_file(lumberObject.path_root, linked=linked, append=append)\n else:\n import_file(lumberObject.path_root, linked=linked, append=append)\n\n if p not in bpy.context.collection.objects:\n obj = bpy.data.objects.new(p, None)\n bpy.context.collection.objects.link(obj)\n obj.instance_type = 'COLLECTION'\n obj.instance_collection = bpy.data.collections[lumberObject.asset]\n obj.location = (transform_data[0], transform_data[1], transform_data[2])\n obj.rotation_euler = (transform_data[3], transform_data[4], transform_data[5])\n obj.scale = (transform_data[6], transform_data[7], transform_data[8])\n\n bpy.ops.file.make_paths_relative()", "def load_data(catalog):\n controller.load_data(catalog)", "def load_raw_data(self, input_files):\n\n log.debug(f\"Loading dataset {input_files}\") \n print(f\"Loading dataset\")\n\n # Load stroke information from XML files\n for file in input_files:\n new_strokeset = strokeset.StrokeSet(file)\n self.strokesets.append(new_strokeset)\n self.stroke_matrix.append(new_strokeset.as_delta_array())\n self.stroke_ascii.append(new_strokeset.get_text())\n\n done_msg = \"Finished parsing dataset. Imported {} lines\".format(len(self.get_strokesets()))\n print (done_msg)\n log.info(done_msg)", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def _load_test_data(self):\n self._save_test_data()", "def loadData(self, data):\n\n #Grab the guide settings in case we want to use them here (and are not stored in data arg)\n existing_data = self.saveData()\n existing_data.update(data)\n data = existing_data\n\n super(OSSMouthGuide, self).loadData( data )\n\n self.loadAllObjectData(data, \"Control\")\n self.loadAllObjectData(data, \"Transform\")\n\n\n return True", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "def import_file(self):\n self.inputdata = json.load(self.infile)\n self.outputdata = self.inputdata\n self.logger.info('Json file Loaded')\n self.logger.debug(u'JSON:{d}'.format(d=self.inputdata))", "def loadData(self,ins):\n raise AbstractError", "def imported(self, session, task):\n drop_feat = self.config['drop'].get(bool)\n\n for item in task.imported_items():\n self.ft_in_title(item, drop_feat)\n item.store()", "def importFolder(self, path, unify=True):\n self.fnames = [f for f in os.listdir(path) if f.endswith('.stl')]\n self.shapes = [AmpObject(os.path.join(path, f), 'limb', unify=unify) for f in self.fnames]\n for s in self.shapes:\n s.lp_smooth(3, brim=True)", "def importer():\n pass", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def init_data():\n ps = Planete_Solidaire(name=\"Planète Solidaire\").save()\n\n maya = Human4j(\n firstname='Maya',\n lastname='Hannachi',\n number='0620902819',\n email='maya.hannachi@epita.fr',\n school='EPITA'\n ).save()\n mailinh = Human4j(\n firstname='Mai-Linh',\n lastname='Lannes',\n number='0612632032',\n email='mai-linh.lannes@epita.fr',\n school='EPITA'\n ).save()\n michel = Human4j(\n firstname='Michel',\n lastname='Sasson',\n number='0662739612',\n email='michel.sasson@epita.fr',\n school='EPITA'\n ).save()\n cedric = Human4j(\n firstname='Cédric',\n lastname='Joly',\n number='',\n email='cedric.joly@epita.fr',\n school='EPITA'\n ).save()\n caroline = Human4j(\n firstname='Caroline',\n lastname='De Paoli',\n number='',\n email='caroline.depaoli@isg.fr',\n school='ISG'\n ).save()\n\n binome = Binome4j().save()\n binome.human1.connect(mailinh)\n binome.human2.connect(maya)\n\n ps.binome.connect(binome)\n ps.cedric.connect(cedric)\n ps.michel.connect(michel)\n ps.caroline.connect(caroline)", "def _load( self, i ):\n if ir.config.verbosity_level >= 2: print(\"[observation] Lazy loading raster\")\n self._raster_data[i] = raster_cube( self._raster_files, line=self._line_info['description'][i], keep_null=self._keep_null )", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)", "def __init__(self, meta: SceneDescription):\n super().__init__(meta)\n self.scenes = []\n self.nodes = []\n self.meshes = []\n self.materials = []\n self.images = []\n self.samplers = []\n self.textures = []\n\n self.path = None\n self.scene = None\n self.gltf = None", "def loadData(self):\r\n self.samplerate = self.app.samplerate\r\n self.sensors = self.app.sensors\r\n self.sensorMask = self.app.sensorMask\r\n self.measurements = self.app.measurements\r\n\r\n # Get min and max data points\r\n for sens in self.sensor_ids:\r\n try:\r\n for i in range(1,self.measurements):\r\n if float(self.app.data[i][sens].text) < self.sensor_range[0]:\r\n self.sensor_range[0] = float(self.app.data[i][sens].text)\r\n elif float(self.app.data[i][sens].text) > self.sensor_range[1]:\r\n self.sensor_range[1] = float(self.app.data[i][sens].text)\r\n except:\r\n print(self.app.data)\r\n \r\n # Set x scale from 0 to end of track\r\n self.scalex = [0,self.measurements]\r\n## self.scalex = [0,self.w/2]\r\n # Set y scale to maximum sensor measurement\r\n self.setScaleY(self.sensor_range[0], self.sensor_range[1])", "def load(self):\n logger.info(\"Loading File!!!!!!!!!!!\")\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file',\n BASE_DIR,\"Template Files(*.json)\") #creates file dialog\n with open(file) as template_json:\n data = json.load(template_json) #json template data\n logger.debug(data)\n for tab in data[\"tabs\"]:\n #create new tab for each specified in data\n tabInfo = data[\"tabs\"][tab]\n newTab =self.tabwidget.newTab(tabInfo[\"name\"], image = tabInfo[\"image\"]) #make tab\n for btn in tabInfo[\"buttons\"]: #make buttons in each tab\n btnInfo = tabInfo[\"buttons\"][btn]\n newbtn = self.newDragBtn(btnInfo[\"color\"], btnInfo[\"connections\"],btnInfo[\"name\"], newTab, btnInfo[\"width\"], btnInfo[\"height\"],newTab)\n newbtn.move(btnInfo[\"x\"],btnInfo[\"y\"]) #move button to location on screen" ]
[ "0.6907678", "0.67906314", "0.6576967", "0.6550817", "0.65185374", "0.62975734", "0.6248734", "0.62315434", "0.6214042", "0.6175726", "0.61386037", "0.61351347", "0.61297685", "0.59827185", "0.5970255", "0.5943906", "0.5901872", "0.589308", "0.58869416", "0.5873126", "0.58356416", "0.58251554", "0.5808933", "0.58057123", "0.57714754", "0.5755034", "0.5724621", "0.5711709", "0.5711709", "0.5711709", "0.5711709", "0.5711281", "0.57107323", "0.57090926", "0.5685074", "0.5678746", "0.5674334", "0.5663441", "0.5660454", "0.5654838", "0.56471324", "0.56454927", "0.56454927", "0.56431514", "0.56420624", "0.56367815", "0.56319624", "0.5625988", "0.56231797", "0.562213", "0.56197965", "0.56172246", "0.56143564", "0.5603383", "0.5593595", "0.5581053", "0.5577286", "0.5577023", "0.55764085", "0.55660754", "0.55476654", "0.5541565", "0.5541556", "0.55403346", "0.5522976", "0.5518961", "0.55029875", "0.5497115", "0.5496636", "0.5496636", "0.5496636", "0.5496636", "0.5496636", "0.5495994", "0.54934764", "0.54934704", "0.54928243", "0.54924566", "0.5492373", "0.5465608", "0.5462009", "0.54578435", "0.54567456", "0.5453955", "0.5453706", "0.5441685", "0.5441274", "0.5434964", "0.54296315", "0.5427791", "0.5424393", "0.5419039", "0.5417773", "0.5416658", "0.5411709", "0.54068446", "0.5399159", "0.53947335", "0.5387975", "0.5380492" ]
0.5926135
16
import lights in scene
def importLights(self, asset = '', searchAndReplace = ['',''] ): if self.lightPath.exists: self.lightPath.imp() if self.lightLinkPath.exists: self.importLightLinking( asset, searchAndReplace )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def flicker_lights(self):\n print 'Lights Set'", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def import_scene(file_path):\n\n pass", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def turnLightingSystemOn():\n dislin.light('ON')", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def testLightImport(self):\n\n archive = IArchive(\"light1.abc\")\n emptyLightObj = ILight(archive.getTop(), \"emptyLight\" )\n lightObj = ILight(archive.getTop(), \"myLight\" )\n\n self.assertFalse(emptyLightObj.getSchema().getArbGeomParams().valid())\n self.assertFalse(emptyLightObj.getSchema().getUserProperties().valid())\n self.assertEqual(lightObj.getSchema().getArbGeomParams().getNumProperties(), 1)\n self.assertEqual(lightObj.getSchema().getUserProperties().getNumProperties(), 1)\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 0 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], 0.666666666666667 )\n self.assertAlmostEqual( window['bottom'], -0.666666666666667 )\n self.assertAlmostEqual( window['left'], -1.0 )\n self.assertAlmostEqual( window['right'], 1.0 )\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 1 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], -0.35 )\n self.assertAlmostEqual( window['bottom'], 0.75 )\n self.assertAlmostEqual( window['left'], 0.1 )\n self.assertAlmostEqual( window['right'], 0.5 )\n\n self.assertFalse(lightObj.getSchema().getCameraSchema().getChildBoundsProperty().valid())", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def initialize_lights(self):\n\t\tfor light in OUTPUT.LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.set_bit(light, 0)\n\t\tfor order in self.orderQueue.yield_orders(exclude=(None,)):\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def lights(self):\n return list(self.GetLights())", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)", "def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]", "def __init__(self, LightFun):\n self.setParameters()\n self.Light = LightFun", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def init_gl(self):\n\n # default background color is white-ish\n background = [.99, .99, .99, 1.0]\n # if user passed a background color use it\n if 'background' in self.kwargs:\n try:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.kwargs['background'])\n # convert to 0.0 - 1.0 float\n background = background.astype(np.float64) / 255.0\n except BaseException:\n log.error('background color wrong!',\n exc_info=True)\n # apply the background color\n gl.glClearColor(*background)\n\n max_depth = (np.abs(self.scene.bounds).max(axis=1) ** 2).sum() ** .5\n max_depth = np.clip(max_depth, 500.00, np.inf)\n gl.glDepthRange(0.0, max_depth)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_LIGHTING)\n gl.glEnable(gl.GL_LIGHT0)\n gl.glEnable(gl.GL_LIGHT1)\n\n # put the light at one corner of the scenes AABB\n gl.glLightfv(gl.GL_LIGHT0,\n gl.GL_POSITION,\n rendering.vector_to_gl(np.append(self.scene.bounds[1], 0)))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_SPECULAR,\n rendering.vector_to_gl(.5, .5, 1, 1))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,\n rendering.vector_to_gl(1, 1, 1, .75))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,\n rendering.vector_to_gl(.1, .1, .1, .2))\n\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(.5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n\n gl.glLineWidth(1.5)\n gl.glPointSize(4)", "async def Turn_On_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": \"green\",\n }", "def draw_scene():\n # Place the camera\n camera.placeCamera()\n \n \n # Set up the global ambient light. (Try commenting out.)\n amb = [ 0*brightness, 0*brightness, 0*brightness, 1.0 ]\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, amb)\n\n # Set up the main light (LIGHT0)... or not.\n if is_light_on:\n place_blue_light()\n place_red_light()\n place_green_light()\n place_lamp_light()\n else:\n glDisable(GL_LIGHT0)\n glDisable(GL_LIGHT1)\n glDisable(GL_LIGHT2)\n glDisable(GL_LIGHT3)\n\n if lamp_light:\n place_lamp_light()\n else:\n glDisable(GL_LIGHT3)\n\n if headlamp_is_on:\n place_headlamp_light()\n else:\n glDisable(GL_LIGHT4)\n\n # Now spin the world around the y-axis (for effect).\n glRotated(angle_movement, 0, 1, 0)\n draw_objects()", "def appInit(self):\n glutInitDisplayMode( GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH )\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0 )\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n glEnable( GL_LIGHTING )\n glEnable( GL_LIGHT0 )\n\n self.set_lighting()\n\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True", "async def light_node_fixture(\n hass: HomeAssistant, matter_client: MagicMock\n) -> MatterNode:\n return await setup_integration_with_node_fixture(\n hass, \"extended-color-light\", matter_client\n )", "def main():\r\n LEDStrip = createNeoPixelObject()\r\n setup(LEDStrip)\r\n clock(LEDStrip)", "def define_materials():\n global robot\n robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))\n robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))\n robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))\n robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [1, 1, 0.4, 0.5]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def __init__(self, light, lights, settings):\n\n if 'name' in lights[light]:\n self.name = lights[light]['name']\n else:\n self.name = light\n if 'gpio' in lights[light]:\n self.gpio = lights[light]['gpio']\n else:\n self.gpio = 18 # GPIO pin 18 is the default for testing\n if 'on' in lights[light]:\n self.on = lights[light]['on']\n else:\n self.on = 'continuous'\n\n GPIO.setup(self.gpio, GPIO.OUT)\n if self.on == 'continuous':\n self.turn_on()\n else: # set up light on/off cyclying other than continuous\n pass # for example, during certain hours", "def addLight(self, id):\r\n\t\t\r\n\t\tnewLight = Light(id)\r\n\t\tself.lights[id] = newLight", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [0.1, 0.1, 0.1, 1]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, self.diffuse_light)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def setup( self ):\n glClearColor(*self.background)\n glClearDepth(1.0)\n glDepthFunc(GL_LEQUAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n '''\n ambientLight = [0.2, 0.2, 0.2, 1.0]\n diffuseLight = [0.8, 0.8, 0.8, 1.0]\n specularLight = [0.5, 0.5, 0.5, 1.0]\n lightPos = [0.0, 0.0, -30.0, 1.0]\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)\n glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight)\n glLightfv(GL_LIGHT0, GL_POSITION, lightPos)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n \n mat = [1.0, 0.0, 0.1, 1.0]\n glMaterialfv(GL_FRONT, GL_AMBIENT, mat)\n mat[0] = 1.0; mat[1] = 0.0; mat[2] = 0.0\n glMaterialfv(GL_FRONT, GL_DIFFUSE, mat)\n mat[0] = 1.0; mat[1] = 1.0; mat[2] = 1.0\n glMaterialfv(GL_FRONT, GL_SPECULAR, mat)\n glMaterialf(GL_FRONT, GL_SHININESS, 0.6*128.0)\n glEnable(GL_FOG)\n fogColor = [1.0, 0.0, 1.0, 1.0]\n \n global fogMode\n fogMode = GL_EXP2\n glFogi (GL_FOG_MODE, fogMode)\n glFogfv (GL_FOG_COLOR, fogColor)\n glFogf (GL_FOG_DENSITY, 0.0001)\n glHint (GL_FOG_HINT, GL_NICEST)\n glFogf (GL_FOG_START, 10.0)\n glFogf (GL_FOG_END, -1000)\n glClearColor(0.0, 0.0, 0.1, 1.0)\n '''\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables smooth color shading\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() \n # Set up perspective view\n gluPerspective(50.0, float(self.size[0])/float(self.size[1]), 0.1, 5000.0)\n # Set up an orthographic view\n #glOrtho(-float(width)/2, float(width)/2, -float(height)/2, float(height)/2, -1.0, 1.0)\n glMatrixMode(GL_MODELVIEW)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n display.flip() # For interactiveness sake\n return", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "async def Turn_On_Lights_With_Color(\n color: str = Path(..., title=\"Color name or hexadecimal string\")\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS, color)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": color,\n }", "def init(filename):\n global trackball, flashlight, vertex_buffer, normal_buffer, color_buffer, colors, vertices, normals\n\n # initialize quaternions for the light and trackball\n flashlight = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n trackball = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n\n # read the .OBJ file into VBOs\n scene.read(filename)\n vertices,normals,colors = scene.compile()\n \n vertex_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(vertices)*4, \n (c_float*len(vertices))(*vertices), GL_STATIC_DRAW)\n\n normal_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(normals)*4, \n (c_float*len(normals))(*normals), GL_STATIC_DRAW)\n\n color_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n\n\n # set up the object shaders\n init_shaders()\n\n glEnable (GL_DEPTH_TEST)", "def activate():\n num = int(var.get())\n return light_controller.activate_scene(light_controller.show_scene_id(scenes[num]))", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])", "def set_light_on(self):\r\n self._light = \"ON\"", "def place_headlamp_light():\n\n lx = 1.0\n ly = light_height\n lz = 2.0\n #light_position = [lx, ly, lz, 1.0]\n light_position = [0.0, 0.0, 0.0, 1]\n light_ambient = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_diffuse = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_specular = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n # glViewport(0, 0, win_width, win_height)\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # gluPerspective(40.0, float(win_width) / float(win_height), 0.01, 100.0)\n #\n # glMatrixMode(GL_MODELVIEW)\n # glLoadIdentity()\n # glPushMatrix()\n glLightfv(GL_LIGHT4, GL_POSITION, light_position)\n\n\n\n #glLightfv(GL_LIGHT4, GL_POSITION, (GLfloat * 4)(0.0, 0.0, 0.0, 1))\n glLightfv(GL_LIGHT4, GL_AMBIENT, light_ambient)\n glLightfv(GL_LIGHT4, GL_DIFFUSE, light_diffuse)\n glLightfv(GL_LIGHT4, GL_SPECULAR, light_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n # glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n # glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n # glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n glLightf(GL_LIGHT4, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT4, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT4, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if headlamp_is_on:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 30.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT4, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT4)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, brightness, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def set_button_light(self, floor, lights, value):\n\t\tif lights[floor] != -1:\n\t\t\tio.set_bit(lights[floor], value)", "def _addLightMenuItems(ned, node):\n pass", "def __init__(self,\r\n lightpos=(10, -10, 20),\r\n lightcol=(1.0, 1.0, 1.0),\r\n lightamb=(0.1, 0.1, 0.2)):\r\n super(Light, self).__init__()\r\n self.lightpos = lightpos\r\n self.lightcol = lightcol\r\n self.lightamb = lightamb", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def start_light_chaser(self, delay: float = 0.1) -> None:\n if self._light_chaser:\n raise ValueError('Light chaser already running.')\n\n async def _chaser():\n while True:\n for i in range(4):\n colors = [off_light] * 4\n colors[i] = self._song.get_cube_light(self.cube_id)\n self._cube.set_light_corners(*colors)\n await asyncio.sleep(delay, loop=self._cube._loop)\n\n self._light_chaser = asyncio.ensure_future(_chaser(), loop=self._cube._loop)", "async def light(self) -> None:\n self.lit = True\n await self.run_command(\"miner fault_light on\")\n print(\"light \" + self.ip)", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def check_light(light: pykulersky.Light):\n light.connect()\n light.get_color()", "def main():\n\n # connect to the hue bridge\n bridge = phue.Bridge()\n bridge.connect() # throw an exception if connection was not established\n\n tracker = beat_tracker.BeatTracker()\n tracker.start()\n try:\n\n # obtain a list of lights to control\n lights = get_lights(bridge)\n\n x = 0\n ids = [l.light_id for l in lights]\n\n while True:\n\n time_between_beats = (60.0 / tracker.tempo)\n\n combos = [\n [1, 0],\n [1, 254],\n [1, 0],\n [500, 254],\n ]\n x = (x + 1) % 4\n\n temp, _brightness = combos[x]\n\n adjust = int(_brightness * (int(tracker.volume / 1500.0) * 2))\n\n if tracker.volume < 1000:\n adjust = 0\n\n brightness = int(min(adjust, 254))\n on = bool(tracker.volume > 800)\n command = {\"ct\": temp, \"bri\": brightness, \"transitiontime\": 1, \"on\": on}\n bridge.set_light(ids, command)\n\n if time_between_beats > 1:\n time.sleep(1)\n else:\n time.sleep(time_between_beats)\n\n finally:\n tracker.stop()", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def __init__(self, game, world_file):\n self.game = game\n self.world_file = world_file\n self.floor_batch = game.floor_batch\n self.wall_batch = game.wall_batch\n self.lightmap = LightMap()\n self.tiles = {}\n self.load_world()\n self.load_tileset()\n self.player_light = self.lightmap.add_light(0,0,15)", "def set_light_on(self):\n self._light = \"ON\"", "def place_red_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 4.0\n ly = light_height\n lz = 2.0\n light_position = [lx, ly, lz, 1.0]\n lightr_ambient = [1.0, 0, 0, 1] # red\n lightb_diffuse = [0.4, 0.4, 0.6, 1] # blue\n lightb_specular = [0.0, 0, 0.8, 1] # blue\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n\n\n # For Light 1 (red), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT1, GL_POSITION, light_position)\n glLightfv(GL_LIGHT1, GL_AMBIENT, lightr_ambient)\n glLightfv(GL_LIGHT1, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT1, GL_SPECULAR, lightb_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if red_light:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT1, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT1)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, 0, 0)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def init_custom_lights(fig_axis, prescale=None):\n epsilon = 0.1 # a factor which forces the positions of the light faces to be close to the intersection\n\n lights_data = []\n\n node_id = 53119168\n\n try:\n out_vectors = np.array(nav.determine_pedigree(node_id))\n except NetworkXNoPath or ValueError:\n raise('Could not determine pedigree for light at node {}'.format(node_id))\n\n degree = len(out_vectors)\n x, y = nav.get_position_of_node(node_id)\n go = [False, True] * degree * 2\n go = go[:degree]\n\n light = {'object': 'light',\n 'node': node_id,\n 'degree': degree,\n 'x': x,\n 'y': y,\n 'switch-counter': 0,\n 'switch-time': models.determine_traffic_light_timer()\n }\n\n light['out-xpositions'] = [x + epsilon * out_vectors[j][0] for j in range(light['degree'])]\n light['out-ypositions'] = [y + epsilon * out_vectors[j][1] for j in range(light['degree'])]\n light['out-xvectors'] = [out_vectors[j][0] for j in range(light['degree'])]\n light['out-yvectors'] = [out_vectors[j][1] for j in range(light['degree'])]\n light['go-values'] = np.array([go[j] for j in range(light['degree'])])\n\n lights_data.append(light)\n\n lights = pd.DataFrame(lights_data)\n\n # determine binning and assign bins to lights\n lights['xbin'], lights['ybin'] = models.determine_bins(fig_axis, lights)\n\n # print('Number of traffic lights: {}'.format(len(lights)))\n return lights", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def render_sample(latents, material_names, include_lights, output_filename, save_scene):\n\n # set output path\n bpy.context.scene.render.filepath = output_filename\n\n # set objects and lights\n update_objects_and_lights(latents, material_names, include_lights)\n\n rgba_background = colorsys.hsv_to_rgb(latents[9] / (2.0 * np.pi), 0.60, 1.0) + (\n 1.0,\n )\n render_utils.change_material(\n bpy.data.objects[\"Ground\"].data.materials[-1], Color=rgba_background\n )\n\n # set scene background\n bpy.ops.render.render(write_still=True)\n\n if save_scene:\n # just for debugging\n bpy.ops.wm.save_as_mainfile(\n filepath=f\"scene_{os.path.basename(output_filename)}.blend\"\n )", "async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }", "def get_light():\n return 'do some magic!'", "def getLight(self):\n return self.light", "async def Rainbow_Lights():\n busylightapi.manager.apply_effect_to_light(ALL_LIGHTS, rainbow)\n return {\n \"action\": \"effect\",\n \"name\": \"rainbow\",\n \"light_id\": \"all\",\n }", "def lightsOn(strip, interval):\r\n clearStrip(strip)\r\n print(\"lightsOn\", strip, interval)\r\n fade(LED_COLOR_OFF, LED_COLOR_FULL, STEPS, interval, strip)", "def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "async def add_lights(self, context):\n if str(context.author.id) in BANNED_USERS:\n await context.send(f'I can\\'t do that, {context.author.mention}')\n return print(f'{TIME}: {context.author} failed to add image Banned.')\n\n await add_image(context, 'lights')", "def toggle_lights(bridge):\n if check_any_light_on(bridge):\n turn_off_lights(bridge)\n else:\n turn_on_lights(bridge)", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def __init__(self):\r\n ScriptedLoadableModuleLogic.__init__(self)\r\n self.rgbport = 18944\r\n self.depthPort = 18945", "def start(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].start()\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def make_simple_scenes(self):\n clown = Clown()\n clown.set_location( 0, 0, 0 )\n clown.set_size( 1, 1, 1 )\n self.objects.append( clown )\n\n clown1Scene = Scene3D()\n clown1Scene.add_object( clown )\n self.scenes.append( clown1Scene )\n\n head = Head()\n head.set_location( 0, 0, 0 )\n head.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( head )\n\n headScene = Scene3D()\n headScene.add_object( head )\n self.scenes.append( headScene )\n\n hat = Hat()\n hat.set_location( 0, 0, 0 )\n hat.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( hat )\n\n hat1Scene = Scene3D()\n hat1Scene.add_object( hat )\n self.scenes.append( hat1Scene )\n\n eye = Eye()\n eye.set_color(1, 0, 0)\n eye.set_location(0, 1, 1)\n eye.set_size(1.3, 1.3, 1.3)\n eye.set_rotate( 45, 1, 0, 0 )\n self.objects.append( eye )\n\n eye1Scene = Scene3D()\n eye1Scene.add_object( eye )\n self.scenes.append( eye1Scene )\n\n donut = Donut()\n donut.set_color(1, 0, 1 )\n donut.set_location( 0, 0, 0 )\n donut.set_size( 2.0, 2.0, 2.0 )\n donut.set_rotate( 45, 0, 1, 0)\n self.objects.append( donut )\n\n donut1Scene = Scene3D()\n donut1Scene.add_object( donut )\n self.scenes.append( donut1Scene )\n\n cone = Cone()\n cone.set_color( 1, 0, 1 )\n cone.set_location( 0, 0, 0 )\n cone.set_size( 2.0, 2.0, 2.0 )\n self.objects.append( cone )\n\n cone1Scene = Scene3D()\n cone1Scene.add_object( cone )\n self.scenes.append( cone1Scene )\n\n box1 = self.make_box(1, Color(1, 0, 1))\n self.objects.append( box1 )\n\n box1Scene = Scene3D()\n box1Scene.add_object( box1 )\n self.scenes.append( box1Scene )\n\n box2 = self.make_box( 1, Color(0, 1, 1 ))\n box2.set_rotate( 45, 0, 0, 1 )\n box2.set_size(2.0, 2.0, 2.0)\n self.objects.append( box2 )\n\n box2Scene = Scene3D()\n box2Scene.add_object( box2 )\n self.scenes.append( box2Scene )\n\n sp = self.make_ball(1, Color(0.8, 0.8, 0))\n sp.set_size(2.0, 2.0, 2.0)\n self.objects.append( sp )\n\n ballScene = Scene3D()\n ballScene.add_object( sp )\n self.scenes.append( ballScene )", "def _set_backpack_led(self, msg):\n # setup color as integer values\n color = [int(x * 255) for x in [msg.r, msg.g, msg.b, msg.a]]\n # create lights object with duration\n light = cozmo.lights.Light(cozmo.lights.Color(rgba=color), on_period_ms=1000)\n # set lights\n self._cozmo.set_all_backpack_lights(light)", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)", "def set_lighting_position(self):\n glLightfv(GL_LIGHT0, GL_POSITION, self.light_position)\n # draw a sphere right on where light is, helps debug :)\n # glPushMatrix()\n # glColor3f(1, 1, 1)\n # glTranslatef(self.light_position[0], self.light_position[1], self.light_position[2])\n # q = gluNewQuadric()\n # gluQuadricDrawStyle(q, GLU_FILL)\n # gluQuadricNormals(q, GLU_SMOOTH)\n # gluSphere(q, .7, 50, 50)\n # glPopMatrix()", "def place_blue_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 3.0\n ly = light_height\n lz = 1.0\n light_position = [ lx, ly, lz, 1.0 ]\n\n lightb_ambient = [0.0, 0, 1, 1] #blue\n lightb_diffuse = [0.4, 0.4, 0.6, 1] #blue\n lightb_specular = [0.0, 0, 0.8, 1] #blue\n light_direction = [ 1.0, -1.0, 1.0, 0.0 ] # Light points down\n\n\n # For Light 0 (blue), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glLightfv(GL_LIGHT0, GL_AMBIENT, lightb_ambient)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT0, GL_SPECULAR, lightb_specular)\n\n\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 1.0)\n glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if blue_light:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF,180.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n \n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n \n glEnable(GL_LIGHT0)\n\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx,ly,lz)\n glDisable(GL_LIGHTING)\n glColor3f(0, 0, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def import_all(self) -> None:\n with open(normpath('levels/level/lvl1.txt'), 'r') as f:\n while f:\n string = f.readline()\n if string == '':\n break\n string = string.strip().split(' ')\n if len(string) == 4:\n self.objects.append(pygame.Rect(int(string[0]), int(\n string[1]), int(string[2]), int(string[3])))\n for i in range(len(self.objects)):\n self.color.append(colors[random.randint(0, len(colors)-1)])", "def importShaders(self):\n\t\tif self.shaderPath.exists:\n\t\t\tself.shaderPath.imp()", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def check_lights(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n all_lights = pm.ls(\n type=[\n \"light\",\n \"aiAreaLight\",\n \"aiSkyDomeLight\",\n \"aiPhotometricLight\",\n \"RedshiftPhysicalSun\",\n \"RedshiftPhysicalLight\",\n \"RedshiftIESLight\",\n \"RedshiftPortalLight\",\n \"RedshiftDomeLight\",\n ]\n )\n progress_controller.increment()\n\n if len(all_lights):\n pm.select(all_lights)\n progress_controller.increment()\n progress_controller.complete()\n raise PublishError(\n \"There are <b>Lights</b> in the current scene:<br><br>%s<br><br>\"\n \"Please delete them!!!\" % \"<br>\".join(map(lambda x: x.name(), all_lights))\n )\n progress_controller.complete()", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n # cmds.file(absSceneFile, i=True)\n nuke.nodePaste(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(210, msg)\n return -1, msg", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)", "def reference_scene(file_path, **kwargs):\n\n pass", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def make_main(self):\n\t\tself.scene.camera = self.main_camera" ]
[ "0.7125975", "0.6562322", "0.65274394", "0.6518946", "0.6397606", "0.63027537", "0.62996095", "0.6106556", "0.6032271", "0.6012362", "0.59544396", "0.5917386", "0.59018606", "0.58937645", "0.58840954", "0.58633345", "0.5855364", "0.577707", "0.57750875", "0.5726249", "0.56992894", "0.56819457", "0.5663281", "0.56577843", "0.56570166", "0.56344575", "0.5629733", "0.5618883", "0.56015676", "0.55990416", "0.55908525", "0.5583412", "0.55818844", "0.5576262", "0.55495745", "0.5538443", "0.55360126", "0.55139345", "0.5510683", "0.5507483", "0.5485059", "0.54676974", "0.54664123", "0.54632324", "0.54536456", "0.5453014", "0.5439837", "0.54333967", "0.542512", "0.5421511", "0.54207546", "0.5409661", "0.54047656", "0.539986", "0.5392716", "0.5384279", "0.53824854", "0.5366448", "0.5364316", "0.5362985", "0.5358127", "0.5352105", "0.5349892", "0.53051025", "0.52966595", "0.52966595", "0.5294716", "0.5271695", "0.52591026", "0.52584904", "0.52509737", "0.52491754", "0.5245942", "0.52443665", "0.5241378", "0.5226235", "0.52227664", "0.5220851", "0.5220527", "0.5216814", "0.521041", "0.52091193", "0.52082336", "0.5202073", "0.5196224", "0.51930845", "0.51872456", "0.51839685", "0.51794827", "0.51741916", "0.517332", "0.5169736", "0.51683265", "0.51651555", "0.51547056", "0.51528984", "0.5152475", "0.5146301", "0.5142998", "0.51375026" ]
0.65937054
1
import light linking to lights
def importLightLinking(self, asset = '', searchAndReplace = ['',''] ): LayersInfo = pickle.load( open( self.lightLinkPath.path, "rb") ) mc.refresh( su = 1 ) if not asset == '': LayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace ) for l in LayersInfo.keys(): objsToBreakLink = [] for link in LayersInfo[l]: if mc.objExists( link ): objsToBreakLink.append( link ) mc.lightlink( b = True, light = l, o = objsToBreakLink ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "async def Turn_On_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": \"green\",\n }", "def setup_lights(self, settings):\n\n for light in settings.lights: # for each light listed in yaml file\n lst = Light(light, settings.lights, settings) # create a Light instance with settings\n self.lights.append(lst) # add it to the list of lights", "def turn_on_lights(bridge):\n for light in bridge.lights:\n bridge.set_light(light.light_id, {'ct': 350, 'bri': 254, 'on': True})", "def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)", "def flicker_lights(self):\n print 'Lights Set'", "def addLight(self, id):\r\n\t\t\r\n\t\tnewLight = Light(id)\r\n\t\tself.lights[id] = newLight", "async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }", "def turnLightingSystemOn():\n dislin.light('ON')", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])", "async def lights(self, context):\n\n await random_image(context, 'lights')", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def testLightImport(self):\n\n archive = IArchive(\"light1.abc\")\n emptyLightObj = ILight(archive.getTop(), \"emptyLight\" )\n lightObj = ILight(archive.getTop(), \"myLight\" )\n\n self.assertFalse(emptyLightObj.getSchema().getArbGeomParams().valid())\n self.assertFalse(emptyLightObj.getSchema().getUserProperties().valid())\n self.assertEqual(lightObj.getSchema().getArbGeomParams().getNumProperties(), 1)\n self.assertEqual(lightObj.getSchema().getUserProperties().getNumProperties(), 1)\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 0 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], 0.666666666666667 )\n self.assertAlmostEqual( window['bottom'], -0.666666666666667 )\n self.assertAlmostEqual( window['left'], -1.0 )\n self.assertAlmostEqual( window['right'], 1.0 )\n\n samp = lightObj.getSchema().getCameraSchema().getValue( 1 )\n window = samp.getScreenWindow();\n self.assertAlmostEqual( window['top'], -0.35 )\n self.assertAlmostEqual( window['bottom'], 0.75 )\n self.assertAlmostEqual( window['left'], 0.1 )\n self.assertAlmostEqual( window['right'], 0.5 )\n\n self.assertFalse(lightObj.getSchema().getCameraSchema().getChildBoundsProperty().valid())", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def place_headlamp_light():\n\n lx = 1.0\n ly = light_height\n lz = 2.0\n #light_position = [lx, ly, lz, 1.0]\n light_position = [0.0, 0.0, 0.0, 1]\n light_ambient = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_diffuse = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_specular = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n # glViewport(0, 0, win_width, win_height)\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # gluPerspective(40.0, float(win_width) / float(win_height), 0.01, 100.0)\n #\n # glMatrixMode(GL_MODELVIEW)\n # glLoadIdentity()\n # glPushMatrix()\n glLightfv(GL_LIGHT4, GL_POSITION, light_position)\n\n\n\n #glLightfv(GL_LIGHT4, GL_POSITION, (GLfloat * 4)(0.0, 0.0, 0.0, 1))\n glLightfv(GL_LIGHT4, GL_AMBIENT, light_ambient)\n glLightfv(GL_LIGHT4, GL_DIFFUSE, light_diffuse)\n glLightfv(GL_LIGHT4, GL_SPECULAR, light_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n # glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n # glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n # glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n glLightf(GL_LIGHT4, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT4, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT4, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if headlamp_is_on:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 30.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT4, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT4)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, brightness, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [1, 1, 0.4, 0.5]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def set_lighting(self):\n lightPosition = [-1, 1, 1, 0]\n glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)\n\n ambientLight = [0.1, 0.1, 0.1, 1]\n\n if self.lighting:\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, self.diffuse_light)\n else:\n glDisable(GL_LIGHTING)\n glDisable(GL_LIGHT0)", "def set_light_on(self):\r\n self._light = \"ON\"", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def gl_lighting():\n for viewer in nuke.allNodes('Viewer'):\n val = int(viewer.knob('gl_lighting').getValue())\n viewer.knob('gl_lighting').setValue(not val)", "def set_light_on(self):\n self._light = \"ON\"", "def place_red_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 4.0\n ly = light_height\n lz = 2.0\n light_position = [lx, ly, lz, 1.0]\n lightr_ambient = [1.0, 0, 0, 1] # red\n lightb_diffuse = [0.4, 0.4, 0.6, 1] # blue\n lightb_specular = [0.0, 0, 0.8, 1] # blue\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n\n\n # For Light 1 (red), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT1, GL_POSITION, light_position)\n glLightfv(GL_LIGHT1, GL_AMBIENT, lightr_ambient)\n glLightfv(GL_LIGHT1, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT1, GL_SPECULAR, lightb_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if red_light:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT1, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT1)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, 0, 0)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def initialize_lights(self):\n\t\tfor light in OUTPUT.LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.set_bit(light, 0)\n\t\tfor order in self.orderQueue.yield_orders(exclude=(None,)):\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)", "def set_button_light(self, floor, lights, value):\n\t\tif lights[floor] != -1:\n\t\t\tio.set_bit(lights[floor], value)", "def do_lights(self, line):\n if not self.huuey.issetup():\n print 'This session is not paired! Pair to a bridge first before continuing'\n return\n\n if len(self.huuey.lights) > 0:\n print 'List of Lights \\n'\n print 'ID\\tUnique'\n\n for index, light in enumerate(self.huuey.lights):\n print u\"{index}\\t{unique}\".format(index=index+1, unique=self.huuey.lights[light].uniqueid)\n\n else:\n if self.huuey.issetup():\n print 'No lights found on bridge'\n else:\n print 'Session not connected to bridge!'", "async def light_fixture(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light\n):\n\n # disable pydantic validation so mocking can happen\n Light.__config__.validate_assignment = False\n\n light_obj = mock_light.copy(deep=True)\n light_obj._api = mock_entry.api\n light_obj.name = \"Test Light\"\n light_obj.is_light_on = False\n\n mock_entry.api.bootstrap.lights = {\n light_obj.id: light_obj,\n }\n\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.LIGHT, 1, 1)\n\n yield (light_obj, \"light.test_light\")\n\n Light.__config__.validate_assignment = True", "def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True", "def _create_light(knx_module: XKNX, config: ConfigType) -> XknxLight:\n group_address_tunable_white = None\n group_address_tunable_white_state = None\n group_address_color_temp = None\n group_address_color_temp_state = None\n if config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.absolute:\n group_address_color_temp = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)\n group_address_color_temp_state = config.get(\n LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS\n )\n elif config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.relative:\n group_address_tunable_white = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)\n group_address_tunable_white_state = config.get(\n LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS\n )\n\n return XknxLight(\n knx_module,\n name=config[CONF_NAME],\n group_address_switch=config[CONF_ADDRESS],\n group_address_switch_state=config.get(LightSchema.CONF_STATE_ADDRESS),\n group_address_brightness=config.get(LightSchema.CONF_BRIGHTNESS_ADDRESS),\n group_address_brightness_state=config.get(\n LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS\n ),\n group_address_color=config.get(LightSchema.CONF_COLOR_ADDRESS),\n group_address_color_state=config.get(LightSchema.CONF_COLOR_STATE_ADDRESS),\n group_address_rgbw=config.get(LightSchema.CONF_RGBW_ADDRESS),\n group_address_rgbw_state=config.get(LightSchema.CONF_RGBW_STATE_ADDRESS),\n group_address_tunable_white=group_address_tunable_white,\n group_address_tunable_white_state=group_address_tunable_white_state,\n group_address_color_temperature=group_address_color_temp,\n group_address_color_temperature_state=group_address_color_temp_state,\n min_kelvin=config[LightSchema.CONF_MIN_KELVIN],\n max_kelvin=config[LightSchema.CONF_MAX_KELVIN],\n )", "def check_light(light: pykulersky.Light):\n light.connect()\n light.get_color()", "async def Turn_On_Lights_With_Color(\n color: str = Path(..., title=\"Color name or hexadecimal string\")\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS, color)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": color,\n }", "def testLighExport(self):\n\n archive = OArchive(\"light1.abc\")\n emptyLightObj = OLight(archive.getTop(), \"emptyLight\")\n lightObj = OLight(archive.getTop(), \"myLight\" )\n\n samp = CameraSample()\n lightObj.getSchema().setCameraSample( samp )\n\n samp = CameraSample( -0.35, 0.75, 0.1, 0.5 )\n lightObj.getSchema().getChildBoundsProperty().setValue(\n Box3d( V3d( 0.0, 0.1, 0.2 ), V3d( 0.3, 0.4, 0.5 ) ) )\n\n lightObj.getSchema().setCameraSample( samp )\n\n arg = lightObj.getSchema().getArbGeomParams()\n param = OFloatGeomParam( arg, \"test\", False, kConstantScope, 1 )\n user = lightObj.getSchema().getUserProperties()\n OFloatProperty( user, \"test\" )", "def toggle_lights(bridge):\n if check_any_light_on(bridge):\n turn_off_lights(bridge)\n else:\n turn_on_lights(bridge)", "def __init__(self, light, lights, settings):\n\n if 'name' in lights[light]:\n self.name = lights[light]['name']\n else:\n self.name = light\n if 'gpio' in lights[light]:\n self.gpio = lights[light]['gpio']\n else:\n self.gpio = 18 # GPIO pin 18 is the default for testing\n if 'on' in lights[light]:\n self.on = lights[light]['on']\n else:\n self.on = 'continuous'\n\n GPIO.setup(self.gpio, GPIO.OUT)\n if self.on == 'continuous':\n self.turn_on()\n else: # set up light on/off cyclying other than continuous\n pass # for example, during certain hours", "def enableLighting(self):\r\n\t\t\r\n\t\tglEnable(GL_LIGHTING)", "def on(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.power_on())\n console.print(f\"[{ip}] Light {id} On:\\n{json.dumps(resp, indent=2)}\")", "def set_light_rgb(self, light, color):\n light_kwargs = { \"rgb_color\": color }\n if not self.use_current_brightness:\n light_kwargs[\"brightness\"] = 255\n self.turn_on(light, **light_kwargs)", "def place_blue_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 3.0\n ly = light_height\n lz = 1.0\n light_position = [ lx, ly, lz, 1.0 ]\n\n lightb_ambient = [0.0, 0, 1, 1] #blue\n lightb_diffuse = [0.4, 0.4, 0.6, 1] #blue\n lightb_specular = [0.0, 0, 0.8, 1] #blue\n light_direction = [ 1.0, -1.0, 1.0, 0.0 ] # Light points down\n\n\n # For Light 0 (blue), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glLightfv(GL_LIGHT0, GL_AMBIENT, lightb_ambient)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT0, GL_SPECULAR, lightb_specular)\n\n\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 1.0)\n glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if blue_light:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT0, GL_SPOT_CUTOFF,180.0)\n glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0.0)\n \n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n \n glEnable(GL_LIGHT0)\n\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx,ly,lz)\n glDisable(GL_LIGHTING)\n glColor3f(0, 0, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def test_02_Light(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C3-02-A - XML'))\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n print(PrettyFormatAny.form(l_light, 'C3-02-B - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_1)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_1)\n self.assertEqual(l_light.UPBAddress, convert.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))", "async def light(self) -> None:\n self.lit = True\n await self.run_command(\"miner fault_light on\")\n print(\"light \" + self.ip)", "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def __init__(self, LightFun):\n self.setParameters()\n self.Light = LightFun", "async def main():\n # Discover all bulbs in the network via broadcast datagram (UDP)\n # function takes the discovery object and returns a list with wizlight objects.\n# bulbs = await discovery.find_wizlights(discovery)\n # Print the IP address of the bulb on index 0\n# print(f\"Bulb IP address: {bulbs[0].ip}\")\n\n # Iterate over all returned bulbs\n# for bulb in bulbs:\n# print(bulb.__dict__)\n # Turn off all available bulbs\n # await bulb.turn_off()\n\n # Set up a standard light\n #light = wizlight(\"192.168.0.170\")\n light = wizlight(\"192.168.86.36\")\n # Set up the light with a custom port\n #light = wizlight(\"your bulb's IP address\", 12345)\n\n # The following calls need to be done inside an asyncio coroutine\n # to run them fron normal synchronous code, you can wrap them with\n # asyncio.run(..).\n\n # Turn on the light into \"rhythm mode\"\n# await light.turn_on(PilotBuilder())\n # Set bulb brightness\n# await light.turn_on(PilotBuilder(brightness = 255))\n\n # Set bulb brightness (with async timeout)\n timeout = 10\n await asyncio.wait_for(light.turn_on(PilotBuilder(brightness = 255)), timeout)\n\n # Set bulb to warm white\n await light.turn_on(PilotBuilder(warm_white = 255))\n\n # Set RGB values\n # red to 0 = 0%, green to 128 = 50%, blue to 255 = 100%", "async def test_light_setup(\n hass: HomeAssistant,\n light: tuple[Light, str],\n):\n\n unique_id = light[0].id\n entity_id = light[1]\n\n entity_registry = er.async_get(hass)\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.unique_id == unique_id\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION", "def main():\n\n # connect to the hue bridge\n bridge = phue.Bridge()\n bridge.connect() # throw an exception if connection was not established\n\n tracker = beat_tracker.BeatTracker()\n tracker.start()\n try:\n\n # obtain a list of lights to control\n lights = get_lights(bridge)\n\n x = 0\n ids = [l.light_id for l in lights]\n\n while True:\n\n time_between_beats = (60.0 / tracker.tempo)\n\n combos = [\n [1, 0],\n [1, 254],\n [1, 0],\n [500, 254],\n ]\n x = (x + 1) % 4\n\n temp, _brightness = combos[x]\n\n adjust = int(_brightness * (int(tracker.volume / 1500.0) * 2))\n\n if tracker.volume < 1000:\n adjust = 0\n\n brightness = int(min(adjust, 254))\n on = bool(tracker.volume > 800)\n command = {\"ct\": temp, \"bri\": brightness, \"transitiontime\": 1, \"on\": on}\n bridge.set_light(ids, command)\n\n if time_between_beats > 1:\n time.sleep(1)\n else:\n time.sleep(time_between_beats)\n\n finally:\n tracker.stop()", "def turnLightOn(ID):\n dislin.litmod(ID, 'ON')", "async def light_fixture(\n hass: HomeAssistant,\n mock_entry: MockEntityFixture,\n mock_light: Light,\n camera: Camera,\n):\n\n # disable pydantic validation so mocking can happen\n Light.__config__.validate_assignment = False\n\n light_obj = mock_light.copy(deep=True)\n light_obj._api = mock_entry.api\n light_obj.name = \"Test Light\"\n light_obj.camera_id = None\n light_obj.light_mode_settings.mode = LightModeType.MOTION\n light_obj.light_mode_settings.enable_at = LightModeEnableType.DARK\n\n mock_entry.api.bootstrap.reset_objects()\n mock_entry.api.bootstrap.cameras = {camera.id: camera}\n mock_entry.api.bootstrap.lights = {\n light_obj.id: light_obj,\n }\n\n await hass.config_entries.async_reload(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.SELECT, 6, 6)\n\n yield light_obj\n\n Light.__config__.validate_assignment = True", "def _set_backpack_led(self, msg):\n # setup color as integer values\n color = [int(x * 255) for x in [msg.r, msg.g, msg.b, msg.a]]\n # create lights object with duration\n light = cozmo.lights.Light(cozmo.lights.Color(rgba=color), on_period_ms=1000)\n # set lights\n self._cozmo.set_all_backpack_lights(light)", "def change_lights_color(self, entity, attribute, oldUrl, newUrl, kwargs):\n if newUrl != oldUrl and newUrl is not None and self.can_change_colors():\n rgb_colors = self.get_colors(self.format_ha_url(newUrl))\n for i in range(len(self.lights)):\n threading.Thread(target=self.set_light_rgb, args=(self.lights[i], rgb_colors[i])).start()", "def add_multiple_lights(properties,object,dist,numLight,gravity=[0,0,-9.81],tgt=None,color=[1.,1.,1.], \\\n spotlight=False,radius=15.,falloff=20.,tightness=10., \\\n area=0.,sample=9,adaptive=True,jitter=True): \n #normalize gravity\n g=op.mul(gravity,-1/op.norm(gravity))\n \n #compute frame\n gabs=[abs(gi) for gi in g]\n id=gabs.index(min(gabs))\n t0=[1. if i==id else 0. for i in range(3)]\n t1=op.cross(t0,g)\n t1=op.mul(t1,1/op.norm(t1))\n t0=op.cross(t1,g)\n \n #find highest direction\n bb=compute_bb(object)\n ctr=op.mul(op.add(bb[0],bb[1]),0.5)\n distg=sum([abs((bb[1][i]-bb[0][i])/2*g[i]) for i in range(3)])\n \n #add each light\n for i in range(numLight):\n angle=math.pi*2*i/numLight\n d0=op.mul(g,distg)\n d1=op.mul(t0,math.sin(angle)*dist)\n d2=op.mul(t1,math.cos(angle)*dist)\n add_light(properties,op.add(d0,op.add(d1,d2)),ctr,color,\n spotlight,radius,falloff,tightness,area,sample,adaptive,jitter)", "def test_04_Light(self):\n l_xml = self.m_xml.light\n l_device = self.m_device_obj\n l_light = deviceXML.read_base_device_object_xml(l_device, l_xml)\n # print(PrettyFormatAny.form(l_light, 'C4-04-A - Light'))\n self.assertEqual(l_light.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_0)", "def set_red_light(self, value):\n self.diffuse_light[0] = value\n self.redraw()", "def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)", "def get_rgb_light():\n return list(light.rgb())", "def set_light_color(self, light_color):\n\n self.light_color = light_color", "def __init__(self,\r\n lightpos=(10, -10, 20),\r\n lightcol=(1.0, 1.0, 1.0),\r\n lightamb=(0.1, 0.1, 0.2)):\r\n super(Light, self).__init__()\r\n self.lightpos = lightpos\r\n self.lightcol = lightcol\r\n self.lightamb = lightamb", "def set_light(self, idx, light):\n\n # Don't set a light that doesn't need its thing set\n if self.application.settings[\"lights_state\"][idx] == light:\n return\n\n # synchronize our internal representation of the lights\n self.application.settings[\"lights_state\"][idx] = light\n\n packed_cmd = srsly.pack_light_data(idx, light)\n srsly.write_light_cmd(\n self.application.settings['serial_connection'],\n packed_cmd,\n sleep=self.application.settings[\"refresh_rate\"])", "def action_on_light_by_id(bridge, light_id, action):\n if action == 'on':\n bridge.set_light(light_id, 'on', True)\n elif action == 'off':\n bridge.set_light(light_id, 'on', False)\n elif action == 'toggle':\n current_state = bridge.get_light(light_id, 'on')\n bridge.set_light(light_id, 'on', not current_state)\n click.secho(\n 'Turning %s light %s!' % (bridge.get_light(light_id, 'name'),\n get_state(not current_state)),\n fg='green')\n\n return", "def setLight(self, id, position, diffuse, specular, ambient):\r\n\t\t\r\n\t\tself.lights[id].set(position, diffuse, specular, ambient)", "def init_custom_lights(fig_axis, prescale=None):\n epsilon = 0.1 # a factor which forces the positions of the light faces to be close to the intersection\n\n lights_data = []\n\n node_id = 53119168\n\n try:\n out_vectors = np.array(nav.determine_pedigree(node_id))\n except NetworkXNoPath or ValueError:\n raise('Could not determine pedigree for light at node {}'.format(node_id))\n\n degree = len(out_vectors)\n x, y = nav.get_position_of_node(node_id)\n go = [False, True] * degree * 2\n go = go[:degree]\n\n light = {'object': 'light',\n 'node': node_id,\n 'degree': degree,\n 'x': x,\n 'y': y,\n 'switch-counter': 0,\n 'switch-time': models.determine_traffic_light_timer()\n }\n\n light['out-xpositions'] = [x + epsilon * out_vectors[j][0] for j in range(light['degree'])]\n light['out-ypositions'] = [y + epsilon * out_vectors[j][1] for j in range(light['degree'])]\n light['out-xvectors'] = [out_vectors[j][0] for j in range(light['degree'])]\n light['out-yvectors'] = [out_vectors[j][1] for j in range(light['degree'])]\n light['go-values'] = np.array([go[j] for j in range(light['degree'])])\n\n lights_data.append(light)\n\n lights = pd.DataFrame(lights_data)\n\n # determine binning and assign bins to lights\n lights['xbin'], lights['ybin'] = models.determine_bins(fig_axis, lights)\n\n # print('Number of traffic lights: {}'.format(len(lights)))\n return lights", "def add_light(self, light):\n # convert from a vtk type if applicable\n if isinstance(light, _vtk.vtkLight) and not isinstance(light, pyvista.Light):\n light = pyvista.Light.from_vtk(light)\n\n if not isinstance(light, pyvista.Light):\n raise TypeError(f'Expected Light instance, got {type(light).__name__} instead.')\n self._lights.append(light)\n self.AddLight(light)\n self.Modified()\n\n # we add the renderer to add/remove the light actor if\n # positional or cone angle is modified\n light.add_renderer(self)", "async def add_lights(self, context):\n if str(context.author.id) in BANNED_USERS:\n await context.send(f'I can\\'t do that, {context.author.mention}')\n return print(f'{TIME}: {context.author} failed to add image Banned.')\n\n await add_image(context, 'lights')", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n entity = OppleLight(name, host)\n\n add_entities([entity])\n\n _LOGGER.debug(\"Init light %s %s\", host, entity.unique_id)", "async def test_color_light(\n hass: HomeAssistant, bulb: MagicMock, transition: float | None\n) -> None:\n already_migrated_config_entry = MockConfigEntry(\n domain=DOMAIN, data={}, unique_id=MAC_ADDRESS\n )\n already_migrated_config_entry.add_to_hass(hass)\n bulb.color_temp = None\n with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):\n await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})\n await hass.async_block_till_done()\n\n entity_id = \"light.my_bulb\"\n KASA_TRANSITION_VALUE = transition * 1_000 if transition is not None else None\n\n BASE_PAYLOAD = {ATTR_ENTITY_ID: entity_id}\n if transition:\n BASE_PAYLOAD[ATTR_TRANSITION] = transition\n\n state = hass.states.get(entity_id)\n assert state.state == \"on\"\n attributes = state.attributes\n assert attributes[ATTR_BRIGHTNESS] == 128\n assert attributes[ATTR_COLOR_MODE] == \"hs\"\n assert attributes[ATTR_SUPPORTED_COLOR_MODES] == [\"brightness\", \"color_temp\", \"hs\"]\n assert attributes[ATTR_MIN_MIREDS] == 111\n assert attributes[ATTR_MAX_MIREDS] == 250\n assert attributes[ATTR_HS_COLOR] == (10, 30)\n assert attributes[ATTR_RGB_COLOR] == (255, 191, 178)\n assert attributes[ATTR_XY_COLOR] == (0.42, 0.336)\n\n await hass.services.async_call(\n LIGHT_DOMAIN, \"turn_off\", BASE_PAYLOAD, blocking=True\n )\n bulb.turn_off.assert_called_once_with(transition=KASA_TRANSITION_VALUE)\n\n await hass.services.async_call(LIGHT_DOMAIN, \"turn_on\", BASE_PAYLOAD, blocking=True)\n bulb.turn_on.assert_called_once_with(transition=KASA_TRANSITION_VALUE)\n bulb.turn_on.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_BRIGHTNESS: 100},\n blocking=True,\n )\n bulb.set_brightness.assert_called_with(39, transition=KASA_TRANSITION_VALUE)\n bulb.set_brightness.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_COLOR_TEMP_KELVIN: 6666},\n blocking=True,\n )\n bulb.set_color_temp.assert_called_with(\n 6666, brightness=None, transition=KASA_TRANSITION_VALUE\n )\n bulb.set_color_temp.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_COLOR_TEMP_KELVIN: 6666},\n blocking=True,\n )\n bulb.set_color_temp.assert_called_with(\n 6666, brightness=None, transition=KASA_TRANSITION_VALUE\n )\n bulb.set_color_temp.reset_mock()\n\n await hass.services.async_call(\n LIGHT_DOMAIN,\n \"turn_on\",\n {**BASE_PAYLOAD, ATTR_HS_COLOR: (10, 30)},\n blocking=True,\n )\n bulb.set_hsv.assert_called_with(10, 30, None, transition=KASA_TRANSITION_VALUE)\n bulb.set_hsv.reset_mock()", "def __init__(\n self,\n hass,\n cl,\n name,\n lights_ct,\n lights_rgb,\n lights_xy,\n lights_brightness,\n disable_brightness_adjust,\n min_brightness,\n max_brightness,\n sleep_entity,\n sleep_state,\n sleep_colortemp,\n sleep_brightness,\n disable_entity,\n disable_state,\n initial_transition,\n ):\n self.hass = hass\n self._cl = cl\n self._name = name\n self._entity_id = \"switch.\" + slugify(f\"circadian_lighting {name}\")\n self._state = None\n self._icon = ICON\n self._hs_color = None\n self._lights_ct = lights_ct\n self._lights_rgb = lights_rgb\n self._lights_xy = lights_xy\n self._lights_brightness = lights_brightness\n self._disable_brightness_adjust = disable_brightness_adjust\n self._min_brightness = min_brightness\n self._max_brightness = max_brightness\n self._sleep_entity = sleep_entity\n self._sleep_state = sleep_state\n self._sleep_colortemp = sleep_colortemp\n self._sleep_brightness = sleep_brightness\n self._disable_entity = disable_entity\n self._disable_state = disable_state\n self._initial_transition = initial_transition\n self._attributes = {\"hs_color\": self._hs_color, \"brightness\": None}\n\n self._lights = lights_ct + lights_rgb + lights_xy + lights_brightness\n\n # Register callbacks\n dispatcher_connect(hass, CIRCADIAN_LIGHTING_UPDATE_TOPIC, self.update_switch)\n track_state_change(hass, self._lights, self.light_state_changed)\n if self._sleep_entity is not None:\n track_state_change(hass, self._sleep_entity, self.sleep_state_changed)\n if self._disable_entity is not None:\n track_state_change(hass, self._disable_entity, self.disable_state_changed)", "async def test_light_turn_on(\n hass: HomeAssistant,\n light: tuple[Light, str],\n):\n\n entity_id = light[1]\n light[0].__fields__[\"set_light\"] = Mock()\n light[0].set_light = AsyncMock()\n\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 128},\n blocking=True,\n )\n\n light[0].set_light.assert_called_once_with(True, 3)", "async def Rainbow_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n\n busylightapi.manager.apply_effect_to_light(light_id, rainbow)\n return {\n \"action\": \"effect\",\n \"name\": \"rainbow\",\n \"light_id\": light_id,\n }", "def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 256}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)", "def lightPath(self):\n\t\treturn mfl.mayaFile( self._path + '/lights.ma' )", "def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)", "def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 32}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)", "def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 64}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)", "def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)", "def lightsOn(strip, interval):\r\n clearStrip(strip)\r\n print(\"lightsOn\", strip, interval)\r\n fade(LED_COLOR_OFF, LED_COLOR_FULL, STEPS, interval, strip)", "async def Blink_Light_With_Color_and_Speed(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n speed: BlinkSpeed = Path(..., title=\"Speed: slow, medium, fast\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_blink(light_id, color, speed)\n return {\n \"action\": \"blink\",\n \"light_id\": light_id,\n \"color\": color,\n \"speed\": speed,\n }", "async def test_light_state(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n await init_integration(hass, aioclient_mock)\n\n entity_registry = er.async_get(hass)\n\n state = hass.states.get(\"light.modernformsfan_light\")\n assert state\n assert state.attributes.get(ATTR_BRIGHTNESS) == 128\n assert state.attributes.get(ATTR_FRIENDLY_NAME) == \"ModernFormsFan Light\"\n assert state.state == STATE_ON\n\n entry = entity_registry.async_get(\"light.modernformsfan_light\")\n assert entry\n assert entry.unique_id == \"AA:BB:CC:DD:EE:FF\"", "def __init__(self, device: SensemeDevice) -> None:\n super().__init__(device, f\"{device.name} Light\")\n self._attr_supported_color_modes = {ColorMode.COLOR_TEMP}\n self._attr_color_mode = ColorMode.COLOR_TEMP\n self._attr_min_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_max\n )\n self._attr_max_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_min\n )", "def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def _activate_leds(self):\n self.leds = [self.getLED('led' + str(i)) for i in range(self.num_leds)]\n self.green_led = self.getLED('led8')\n self.front_led = self.getLED('led9')\n self.front_led.set(1)", "def place_green_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 5.0\n ly = light_height\n lz = 3.0\n light_position = [lx, ly, lz, 1.0]\n lightg_ambient = [0, 1.0, 0, 1] # green\n lightb_diffuse = [0.4, 0.4, 0.6, 1] # blue\n lightb_specular = [0.0, 0, 0.8, 1] # blue\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n\n # For Light 2 (green), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT2, GL_POSITION, light_position)\n glLightfv(GL_LIGHT2, GL_AMBIENT, lightg_ambient)\n glLightfv(GL_LIGHT2, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT2, GL_SPECULAR, lightb_specular)\n\n glLightf(GL_LIGHT2, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT2, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT2, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if green_light:\n glLightf(GL_LIGHT2, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT2, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT2, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT2, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT2, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n\n glEnable(GL_LIGHT2)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(0, brightness, 0)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()", "def lights(self):\n return list(self.GetLights())", "def new_light():\n name = request.args.get('name')\n types = request.args.get('channels').split(\",\")\n all_lights[name] = types\n print name, all_lights[name]\n with open('data/lights.json', 'w') as f:\n f.write(json.dumps(all_lights))\n return json_back()\n return \"ERROR\"", "def add_light(self, name, light):\n if isinstance(light, AmbientLight):\n raise ValueError('Set ambient light with set_ambient_light(), not with add_light()')\n if len(self._lights) == MAX_N_LIGHTS:\n raise ValueError('The maximum number of lights in a scene is capped at {}'.format(MAX_N_LIGHTS))\n if not isinstance(light, PointLight) and not isinstance(light, DirectionalLight):\n raise ValueError('Scene only supports PointLight and DirectionalLight types')\n self._lights[name] = light", "async def light_node_fixture(\n hass: HomeAssistant, matter_client: MagicMock\n) -> MatterNode:\n return await setup_integration_with_node_fixture(\n hass, \"extended-color-light\", matter_client\n )", "def get_light():\n return 'do some magic!'", "def yieldLight( self ) -> str:\n\t\tself.lightIndex += 1\n\n\t\tlights = Controller.ALL_KNOWN_TRAFFIC_LIGHTS + Controller.TRAIN_PHANTOM_LIGHTS\n\n\t\tif self.lightIndex == len( lights ):\n\t\t\tself.lightIndex = 0\n\n\n\t\treturn lights[self.lightIndex]", "def set_light_callback(self, direction, floor, value):\n\t\tif direction == ORDERDIR.UP:\n\t\t\tlights = OUTPUT.UP_LIGHTS\n\t\telif direction == ORDERDIR.DOWN:\n\t\t\tlights = OUTPUT.DOWN_LIGHTS\n\t\tself.set_button_light(floor, lights, value)", "def light_sync(self):", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def getLight(self):\n return self.light" ]
[ "0.7286741", "0.6968058", "0.667978", "0.6516438", "0.6463352", "0.6461872", "0.6437315", "0.6400645", "0.63854384", "0.63444823", "0.6283221", "0.62622285", "0.6234883", "0.61583436", "0.61548394", "0.614277", "0.6137061", "0.61298877", "0.61078626", "0.61018175", "0.6089106", "0.6081094", "0.6052358", "0.60355777", "0.59865034", "0.5972309", "0.5947632", "0.59467125", "0.5939675", "0.5898696", "0.58790684", "0.58778113", "0.5866109", "0.5855703", "0.58403975", "0.58388644", "0.58162796", "0.58145565", "0.5795281", "0.57775235", "0.5770188", "0.57486385", "0.5745383", "0.5744481", "0.5737615", "0.57160723", "0.5702036", "0.5698924", "0.5679039", "0.5664426", "0.5664424", "0.5650175", "0.5649477", "0.5632964", "0.5625747", "0.5603573", "0.5582914", "0.55766624", "0.5571745", "0.5568195", "0.5560409", "0.5558972", "0.55580574", "0.55491424", "0.55484736", "0.5535188", "0.55323064", "0.5530735", "0.5527514", "0.551717", "0.5515682", "0.5512046", "0.5508987", "0.5507501", "0.549507", "0.5472755", "0.5470072", "0.5458407", "0.5457196", "0.54525924", "0.5443126", "0.5440174", "0.54358166", "0.54257566", "0.54203826", "0.54194844", "0.5413711", "0.5413711", "0.54124385", "0.5404282", "0.5398864", "0.5386157", "0.5380188", "0.53785723", "0.53674674", "0.53584135", "0.53396416", "0.5337005", "0.5336196", "0.5328716" ]
0.70111126
1
filter light linking data for the specific asset
def filterLightLinksData(self, LayersInfo , asset, sAr = ['',''] ): lightData = [(a.replace( sAr[0], sAr[1] ),LayersInfo[a].replace( sAr[0], sAr[1] )) for a in LayersInfo.keys() if asset in a] return dict( lightData )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def filter(self, filters):", "def get_filters(self):", "def filterToLight( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n lit = int(255*HSL[2]) # convert to 0-255 range\n bmp.pixels[h][w] = (lit,lit,lit)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def apply_filter(self, image):\n pass", "def filter(self, drawable):\n pass", "def broadbandfilter(self):\n _, = self.broadbandfilters\n return _", "def exportLightLinking(self):\n\t\tlights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a]\n\t\tallShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))]\n\t\tlitLinks = {}\n\t\tfor l in lights:\n\t\t\tlightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0)\n\t\t\tlitLinks[l]\t = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT\n\t\tpickle.dump( litLinks, open( self.lightLinkPath.path, \"wb\" ) )", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered", "def process_asset_data(data):\n buffered_assets = []\n\n for asset in data:\n asset_geom = shape(asset['geometry'])\n buffered_geom = asset_geom.buffer(100)\n\n asset['buffer'] = buffered_geom\n buffered_assets.append(asset)\n\n output = []\n assets_seen = set()\n\n for asset in tqdm(buffered_assets):\n if asset['properties']['Opref'] in assets_seen:\n continue\n assets_seen.add(asset['properties']['Opref'])\n touching_assets = []\n for other_asset in buffered_assets:\n if asset['buffer'].intersects(other_asset['buffer']):\n touching_assets.append(other_asset)\n assets_seen.add(other_asset['properties']['Opref'])\n\n dissolved_shape = cascaded_union([a['buffer'] for a in touching_assets])\n final_centroid = dissolved_shape.centroid\n output.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [final_centroid.coords[0][0], final_centroid.coords[0][1]],\n },\n 'properties':{\n 'name': asset['properties']['name'],\n }\n })\n\n return output", "def specific_asset(self, asset: str) -> dict:\n \n specific_asset_url = self.network + bf_assets_url + asset\n\n response = query_blockfrost(specific_asset_url, self.api_key, self.proxies)\n \n return response", "def __apply_filters(url, dataset_code):\n if '?' not in url:\n url += '?'\n else:\n url += '&'\n for key in dataset_code.FILTERS:\n if isinstance(dataset_code.FILTERS[key], list):\n for value in dataset_code.FILTERS[key]:\n url += key + '=' + str(value) + '&'\n else:\n url += key + '=' + str(dataset_code.FILTERS[key]) + '&'\n url = url[0:-1]\n return url", "def reference_filters(self, version, options):\n pass", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def crossWalkGeoBlacklight(data):\n\n dataJsonObj=deep_get(data,\"xml.fgdc\",[])\n if len (dataJsonObj)>0:\n dataJsonObj=deep_get(dataJsonObj[0],\"data\",{})\n else:\n dataJsonObj={}\n layername=os.path.splitext(os.path.basename(data['file']))[0]\n geoserver_layername = data['geoserverStoreName']\n gblight = assignMetaDataComponents(dataJsonObj,layername,geoserver_layername,data[\"resource_type\"])\n gblight['solr_geom']=data['bounds']\n data['geoblacklightschema']=gblight\n return data", "async def filter(self, **kwargs):\n\n pass", "def planes_with_light_profiles(tracer):\n # NOTE: Find all planes with light profiles\n # NOTE:\n # # image = tracer.galaxies[1].profile_image_from_grid(grid=grid)\n # # plt.figure()\n # # plt.imshow(image.in_2d)\n # # plt.show()\n #\n # # asd = list(map(lambda plane: plane.has_light_profile, tracer.planes))\n # # print(asd)\n # #print(tracer.planes)\n #\n # #print(tracer.has_light_profile)\n # #print(list(map(lambda plane: plane.has_light_profile, tracer.planes)))\n # #print(tracer.galaxies_with_light_profile)\n #\n # #print(tracer.planes[1].galaxies_with_light_profile)\n #\n # galaxies = tracer.planes[1].galaxies_with_light_profile\n # galaxy = galaxies[0]\n #\n # galaxy_light_profiles = galaxy.light_profiles\n #\n # image_0 = galaxy_light_profiles[0].profile_image_from_grid(grid=grid)\n # image_0_in_2d = image_0.in_2d\n #\n # image_1 = galaxy_light_profiles[1].profile_image_from_grid(grid=grid)\n # image_1_in_2d = image_1.in_2d", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filter(self, *args, **kwargs):", "async def add(self, ctx, *, link):\r\n try: # compatability with older versions\r\n self.adkillr[ctx.message.server.id]['filters'].append(link)\r\n except KeyError:\r\n self.adkillr[ctx.message.server.id]['filters'] = [link]\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)\r\n await self.bot.say(\"Filter added.\")", "async def low_pass(\n client,\n event,\n smoothing: P('float', 'smoothing', min_value = 0.0, max_value = 5.0),\n):\n player = get_player_or_abort(client, event)\n \n filter = LowPass(smoothing)\n player.add_filter(filter)\n await player.apply_filters()\n \n return create_filter_added_embed(filter)", "def lightlink(*args, b: bool=True, hierarchy: bool=True, light: Union[name, List[name]]=None,\n make: bool=True, object: Union[name, List[name]]=None, sets: bool=True, shadow:\n bool=True, shapes: bool=True, transforms: bool=True, useActiveLights: bool=True,\n useActiveObjects: bool=True, q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered", "def use_effect(self):\n if self.preview_name in FILTERS:\n photo = Image.open(self.path.url[1:])\n preview = photo.filter(FILTERS.get(self.preview_name))\n preview.save(self.path.url[1:])", "def pwgrwlfilter(self):\n return None", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def filter_data(article):\n filtered = {\n 'id': article['id'],\n 'title': article['title'],\n 'perex': article['perex'],\n 'body': article['body'],\n 'author': article['author'].get('name', None) \n if article['author'] is not None \n else None,\n 'image': get_image(article),\n 'source': article['source']['name'],\n 'label': article['label']\n }\n\n return filtered", "def filter(data_raw: dict, sigma: int=1) -> dict:\n data = Filter.__band_filter(data_raw, lowFreq=2, highFreq=70, filterType='bandstop')\n data = Filter.__laplacian_filter(data,sigma) #Need to write test for this once its complete\n return data", "def filter_array(image: Image, filter_id: str) -> Image:\n \n if filter_id == \"3\":\n image = three_tone(image,\"aqua\",\"blood\",\"lemon\")\n elif filter_id == \"X\":\n image = extreme_contrast(image)\n elif filter_id == \"T\":\n image = sepia_filter(image)\n elif filter_id == \"P\":\n image = posterize(image)\n elif filter_id == \"E\":\n image = detect_edges(image,15)\n elif filter_id == \"V\":\n image = flip_vertical(image)\n elif filter_id == \"H\":\n image = flip_horizontal(image)\n \n return image", "def light(brightness, filter):\n brightness = clamp(MIN_BRIGHTNESS, round(brightness), MAX_BRIGHTNESS)\n for col in range(DISPLAY_WIDTH):\n for row in range(DISPLAY_HEIGHT):\n if filter(col, row):\n microbit.display.set_pixel(col, row, brightness)", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def filterRansac():\n pass", "def filter_nonhsa_targets(data_list):\n if FOLD_CHANGE in data_list[0].keys(): # check whether it is a FC or a FR dataset\n data_type = FOLD_CHANGE\n elif FOLD_REGULATION in data_list[0].keys():\n data_type = FOLD_REGULATION\n else:\n data_type = 'Results'\n return [{TEST_SAMPLE: data_elem[TEST_SAMPLE], CONTROL_SAMPLE: data_elem[CONTROL_SAMPLE],\n data_type: data_elem[data_type][data_elem[data_type][TARGET_NAME]\n .apply(lambda el: True if el.startswith('hsa-') else False)]}\n for data_elem in data_list]", "def list_reference_images_in_filter(conn,primary_ref,f,log):\n\n log.info('Identifying all current reference image in filter '+str(f))\n\n query = 'SELECT * FROM reference_images WHERE filter=\"'+str(primary_ref[f])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND facility!=\"'+str(primary_ref['facility_id'])+'\"'\n\n ref_image_list = phot_db.query_to_astropy_table(conn, query, args=())\n\n log.info(repr(ref_image_list))\n\n return ref_image_list", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def _filter(self, values, asset):\n log.debug(\"Testing trigger filters against asset %s\", asset['id'])\n for filter in self.filters:\n if not filter._apply(values, asset):\n return False\n return True", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def filter(self, value, model=None, context=None):\n\n # string filter: skip non-strings\n if type(value) is not str:\n return value\n\n linker = Linker(**self.linkify_params)\n return linker.linkify(value)", "def filter(self, filter_dict):\n pass", "def load_scans_filter(img_org, filterdata):\n\n # check which filter will be used and apply that one\n filter = filterdata['filtername']\n if filter == 'gaussian':\n sigma = filterdata['parameters'][0]\n smoothed_img = calc_gaussian(img_org, sigma=sigma)\n elif filter == 'median':\n radius = filterdata['parameters'][0]\n smoothed_img = calc_median(img_org, radius=radius)\n elif filter == 'curvatureflow':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n smoothed_img = calc_curvatureflow(img_org, iteration=iter, step=timestep)\n elif filter == 'anisodiff':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n conductance = filterdata['parameters'][2]\n smoothed_img = calc_anisodiff(img_org, iteration=iter, step=timestep, conductance=conductance)\n else:\n print('The filtername does not exist.')\n\n return smoothed_img", "def filter(self, target_model):\n # return filter_dict_to_target_model(self._axl_data, target_model)\n super().__setattr__('_axl_data', filter_dict_to_target_model(self._axl_data, target_model))\n return self", "def get_links_filter(self, keyword, number_links):\r\n podcast_data = []\r\n\r\n for entry in self.rss[0].entries:\r\n if keyword in entry.title: \r\n try:\r\n podcast_data = [entry.published, entry.title, \r\n entry.enclosures[0]['href'], \r\n self.rss[0].feed.title\r\n ]\r\n except IOError as err:\r\n print err\r\n except UnicodeDecodeError as err:\r\n print err\r\n else:\r\n self.podcast_list.append(podcast_data)\r\n if number_links != 0:\r\n if len(self.podcast_list) == number_links: \r\n return None\r\n return None", "def _on_lowres_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_proxy()", "def test_filters_with_extra_extraction(self) -> None:\n\n # pylint: disable=too-many-locals\n\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n def add_named_library(in_dict: Dict[str, Any]) -> Dict[str, Any]:\n out_dict = deepdict(in_dict)\n out_dict[\"libraries\"].append({\n \"name\": \"abcdef\",\n \"milkyway techfile\": \"test/abcdef.tf\"\n })\n return out_dict\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, add_named_library)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n def filter_func(lib: hammer_tech.Library) -> bool:\n return lib.milkyway_techfile is not None\n\n def paths_func(lib: hammer_tech.Library) -> List[str]:\n assert lib.milkyway_techfile is not None\n return [lib.milkyway_techfile]\n\n def extraction_func(lib: hammer_tech.Library, paths: List[str]) -> List[str]:\n assert len(paths) == 1\n if lib.name is None:\n name = \"\"\n else:\n name = str(lib.name)\n return [json.dumps({\"path\": paths[0], \"name\": name}, cls=HammerJSONEncoder, indent=4)]\n\n def sort_func(lib: hammer_tech.Library):\n assert lib.milkyway_techfile is not None\n return lib.milkyway_techfile\n\n test_filter = LibraryFilter.new(\"metatest\", \"Test filter that extracts metadata\",\n is_file=True, filter_func=filter_func,\n paths_func=paths_func,\n extraction_func=extraction_func,\n sort_func=sort_func)\n\n database = hammer_config.HammerDatabase()\n tech.set_database(database)\n raw = tech.process_library_filter(pre_filts=[], filt=test_filter,\n must_exist=False,\n output_func=hammer_tech.HammerTechnologyUtils.to_plain_item)\n\n # Disable false positive from pylint\n outputs = list(map(lambda s: json.loads(s), raw)) # pylint: disable=unnecessary-lambda\n self.assertEqual(outputs,\n [\n {\"path\": tech.prepend_dir_path(\"test/abcdef.tf\"), \"name\": \"abcdef\"},\n {\"path\": tech.prepend_dir_path(\"test/coconut\"), \"name\": \"\"},\n {\"path\": tech.prepend_dir_path(\"test/soy\"), \"name\": \"\"}\n ])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\t\tif len(finalLi) == 1:\n\t\t\t\tlitsToExport.append( finalLi[0] )\n\t\t\telse:\n\t\t\t\tlitsToExport.append( finalLi[1] )\n\t\tif litsToExport:\n\t\t\tmc.select( litsToExport, r=1, ne=1 )\n\t\t\tmc.file( self.lightPath.path, op=\"v=0\", typ=\"mayaAscii\", pr=1, es=1 )\n\t\t\t#export Light Linking\n\t\t\tself.exportLightLinking()", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def filters(self):\n\t\treturn self.local_filter", "def filter_smooth(self, transect, setting):\n\n # Set property\n self.smooth_filter = setting\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n n_ensembles = len(ens_time)\n # Determine if smooth filter should be applied\n if self.smooth_filter == 'On':\n # Initialize arrays\n self.smooth_speed = repmat([np.nan], 1, n_ensembles)\n self.smooth_upper_limit = repmat([np.nan], 1, n_ensembles)\n self.smooth_lower_limit = repmat([np.nan], 1, n_ensembles)\n\n # Boat velocity components\n b_vele = np.copy(self.u_mps)\n b_veln = np.copy(self.v_mps)\n\n # Set filter parameters\n filter_width = 10\n half_width = 10\n multiplier = 9\n cycles = 3\n\n # Initialize variables\n bt_bad_idx = []\n upper_limit = 0\n lower_limit = 0\n\n # Compute speed and direction of boat\n direct, speed = cart2pol(b_vele, b_veln)\n\n # Compute residuals from a robust Loess smooth\n speed_smooth = rloess(ens_time, speed, filter_width)\n speed_res = speed - speed_smooth\n\n # Apply a trimmed standard deviation filter multiple times\n for i in range(cycles):\n filter_array = BoatData.run_std_trim(half_width, speed_res.T)\n\n # Compute filter bounds\n upper_limit = speed_smooth + multiplier * filter_array\n lower_limit = speed_smooth - multiplier * filter_array\n\n # Apply filter to residuals\n bt_bad_idx = np.where(np.logical_or(np.greater(speed, upper_limit), np.less(speed, lower_limit)))[0]\n speed_res[bt_bad_idx] = np.nan\n\n # Update valid_data property\n self.valid_data[4, :] = True\n self.valid_data[4, bt_bad_idx] = False\n self.valid_data[4, self.valid_data[1, :] == False] = True\n self.smooth_upper_limit = upper_limit\n self.smooth_lower_limit = lower_limit\n self.smooth_speed = speed_smooth\n\n else:\n\n # No filter applied all data assumed valid\n self.valid_data[4, :] = True\n self.smooth_upper_limit = np.nan\n self.smooth_lower_limit = np.nan\n self.smooth_speed = np.nan\n\n # Combine all filter data to composite valid data\n self.valid_data[0, :] = np.all(self.valid_data[1:, ], 0)\n self.num_invalid = np.sum(self.valid_data[0, :] == False, 0)", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def filterWithSITK(self):\n #research\n profbox()\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n backgroundNodeName = backgroundNode.GetName()\n backgroundImage = sitk.ReadImage( sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n filterImage = sitk.GradientMagnitudeRecursiveGaussian( backgroundImage, float(2) );\n del backgroundImage\n sitk.WriteImage( filterImage, sitkUtils.GetSlicerITKReadWriteAddress( backgroundNodeName ) )\n \n # notify\n backgroundNode.GetImageData().Modified()\n backgroundNode.Modified()", "def _filter_anchors(anchors):\n #TODO: Implement this function\n return anchors", "def test_brainvision_data_filters():\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_highpass_path,\n montage=montage, eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n\n assert_equal(raw.info['highpass'], 0.1)\n assert_equal(raw.info['lowpass'], 250.)", "def test_instrument_inventory_filtering():\n filt = 'GR150R'\n data = mm.instrument_inventory('niriss',\n add_filters={'filter': filt},\n return_data=True)\n\n filters = [row['filter'] for row in data['data']]\n\n assert all([i == filt for i in filters])", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def wiener_filter_data(self, data):\n # set up fields\n t = np.copy(data)\n t[self.mask.bad_pix] = hp.UNSEEN\n t[self.mask.good_pix] *= self.weights_map[self.mask.good_pix]\n tlm = hp.map2alm(t, self.params.lmax, iter=0)\n # get cooling schedule\n lamb_list = self.cs.lamb_list\n eps_list = self.cs.eps_list\n for i in range(len(lamb_list)):\n xlm, tlm = self.solve_flm(tlm, data, lamb_list[i], eps_list[i], wf = True)\n return xlm", "def filter(ctx: click.Context):\n vcf: Reader = vcfpy.Reader.from_path(ctx.obj[\"vcf_file\"])\n filter_settings: Dict[str, Dict] = SV_FILTER_SETTINGS[\"tiddit_tumor_normal\"]\n\n # Update VCF header\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_T_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in tumor, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_N_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in normal, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"normal_variant\"),\n (\"Description\", \"AF_T_MAX == 0 and ctg_t == False\"),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_normal_allele_frequency']['filter']}\"),\n (\n \"Description\",\n f\"AF_N_MAX > {filter_settings['max_normal_allele_frequency']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_tin_fraction']['filter']}\"),\n (\n \"Description\",\n f\"(AF_N_MAX / AF_T_MAX) > {filter_settings['max_tin_fraction']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"in_normal\"),\n (\"Description\", \"ctg_n == True and AF_N_MAX == 0 and AF_T_MAX <= 0.25\"),\n ]\n )\n )\n\n writer = vcfpy.Writer.from_path(\"/dev/stdout\", vcf.header)\n\n # Set soft filters for variants based on presence in the normal sample\n for variant in vcf:\n variant_info: dict = variant.INFO\n\n # Collect evidence of variant in tumor and normal sample\n evidence_dict: dict = get_tumor_normal_evidence(variant_info)\n allele_frequency_tumor: float = evidence_dict[\"tumor_max_af\"]\n allele_frequency_normal: float = evidence_dict[\"normal_max_af\"]\n tumor_has_contig: bool = evidence_dict[\"tumor_has_contig\"]\n normal_has_contig: bool = evidence_dict[\"normal_has_contig\"]\n\n # Add AF_MAX to info field\n variant.INFO[\"AF_T_MAX\"] = [round(allele_frequency_tumor, 4)]\n variant.INFO[\"AF_N_MAX\"] = [round(allele_frequency_normal, 4)]\n\n # Set filter statuses\n if allele_frequency_tumor == 0 and not tumor_has_contig:\n variant.add_filter(\"normal_variant\")\n writer.write_record(variant)\n continue\n\n # Regardless of CTG, set filter if AF_T / AF_N > max_tin_fraction\n normal_tumor_af_ratio = (\n float(allele_frequency_normal / allele_frequency_tumor)\n if allele_frequency_tumor > 0\n else 0\n )\n if normal_tumor_af_ratio > filter_settings[\"max_tin_fraction\"][\"value\"]:\n variant.add_filter(\"high_normal_af_fraction\")\n\n # Set filter if AF_N > 0.25\n if (\n allele_frequency_normal\n > filter_settings[\"max_normal_allele_frequency\"][\"value\"]\n ):\n variant.add_filter(\"high_normal_af\")\n\n # Set filter if CTG_N = True, AF_N is 0 and AF_T is below 0.25\n if (\n normal_has_contig\n and allele_frequency_normal == 0\n and allele_frequency_tumor <= 0.25\n ):\n variant.add_filter(\"in_normal\")\n\n writer.write_record(variant)", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)", "def specialSearch(searchType, start=0, length=20, assetType=None):\n url = \"%s/rest/assets/search/%s/%i/%i\" % (serverString, searchType, start, length)\n if assetType:\n url += \"/\"+assetType\n doc = minidom.parseString(urllib.urlopen(url).read().decode(\"utf-8\", \"ignore\").encode(\"ascii\", \"xmlcharrefreplace\"))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n assets = []\n for element in doc.getElementsByTagName(\"asset\"):\n assets += [Asset()]\n assets[-1]._getInfoFromNode(element)\n return assets", "def social_healthcare(osm_path): \n df_all = retrieve(osm_path,'multipolygons',['other_tags', 'amenity']).rename(columns={'other_tags': 'asset'}) \n \n #delete rows that are duplicates of social_amenity\n asset_list = ['hospital', 'doctors', 'clinic', 'dentist', 'pharmacy'] #note that this list of assets should be similar to assets extracted in def social_amenity\n for asset in asset_list:\n index_delete = df_all[(df_all['amenity'] == asset)].index\n df_all.drop(index_delete,inplace=True)\n df_all = df_all.drop(['amenity'], axis=1).reset_index(drop=True) #drop amenity column, reset index\n \n #get requested assets \n df = healthcare_filter(df_all)\n \n return df.reset_index(drop=True)", "def filter_img(img, new_img, f):\n\n datas = img.getdata()\n new_data = []\n for item in datas:\n if f(item[0]) and f(item[1]) and f(item[2]):\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n new_img.putdata(new_data)", "def with_lens_light(\r\n path_prefix: str,\r\n analysis: Union[al.AnalysisImaging, al.AnalysisInterferometer],\r\n setup_hyper: al.SetupHyper,\r\n source_results: af.ResultsCollection,\r\n light_results: af.ResultsCollection,\r\n mass: af.Model(al.mp.MassProfile) = af.Model(al.mp.EllIsothermal),\r\n smbh: af.Model(al.mp.MassProfile) = None,\r\n mass_centre: Optional[Tuple[float, float]] = None,\r\n end_with_hyper_extension: bool = False,\r\n unique_tag: Optional[str] = None,\r\n session: Optional[bool] = None,\r\n) -> af.ResultsCollection:\r\n\r\n \"\"\"\r\n __Model + Search + Analysis + Model-Fit (Search 1)__\r\n\r\n In search 1 of the MASS TOTAL PIPELINE we fit a lens model where:\r\n\r\n - The lens galaxy mass is modeled using a total mass distribution [Priors initialized from SOURCE PIPELINE].\r\n - The source galaxy's light is parametric or an inversion depending on the previous pipeline [Model and priors \r\n initialized from SOURCE PIPELINE].\r\n\r\n This search aims to accurately estimate the lens mass model, using the improved mass model priors and source model \r\n of the SOURCE PIPELINE\r\n \"\"\"\r\n mass = slam_util.mass__from_result(\r\n mass=mass, result=source_results.last, unfix_mass_centre=True\r\n )\r\n\r\n if mass_centre is not None:\r\n mass.centre = mass_centre\r\n\r\n if smbh is not None:\r\n smbh.centre = mass.centre\r\n\r\n source = slam_util.source__from_result_model_if_parametric(\r\n result=source_results.last, setup_hyper=setup_hyper\r\n )\r\n\r\n model = af.Collection(\r\n galaxies=af.Collection(\r\n lens=af.Model(\r\n al.Galaxy,\r\n redshift=light_results.last.instance.galaxies.lens.redshift,\r\n bulge=light_results.last.instance.galaxies.lens.bulge,\r\n disk=light_results.last.instance.galaxies.lens.disk,\r\n envelope=light_results.last.instance.galaxies.lens.envelope,\r\n mass=mass,\r\n shear=source_results.last.model.galaxies.lens.shear,\r\n smbh=smbh,\r\n hyper_galaxy=setup_hyper.hyper_galaxy_lens_from_result(\r\n result=light_results.last\r\n ),\r\n ),\r\n source=source,\r\n )\r\n )\r\n\r\n search = af.DynestyStatic(\r\n path_prefix=path_prefix,\r\n name=\"mass_total[1]_light[parametric]_mass[total]_source\",\r\n unique_tag=unique_tag,\r\n session=session,\r\n nlive=100,\r\n )\r\n\r\n result_1 = search.fit(model=model, analysis=analysis)\r\n\r\n \"\"\"\r\n __Hyper Extension__\r\n\r\n The above search may be extended with a hyper-search, if the SetupHyper has one or more of the following inputs:\r\n\r\n - The source is using an `Inversion`.\r\n - One or more `HyperGalaxy`'s are included.\r\n - The background sky is included via `hyper_image_sky` input.\r\n - The background noise is included via the `hyper_background_noise`.\r\n \"\"\"\r\n\r\n if end_with_hyper_extension:\r\n\r\n result_1 = extensions.hyper_fit(\r\n setup_hyper=setup_hyper,\r\n result=result_1,\r\n analysis=analysis,\r\n include_hyper_image_sky=True,\r\n )\r\n\r\n return af.ResultsCollection([result_1])", "def filter_detect(self, x):\n b, a = self.c_detect\n return filtfilt(b, a, x)", "def pass_filters(device):\n if opts.filter_on_group:\n if device.owningTeam not in opts.filter_on_group:\n return False\n if opts.filter_on_type:\n if device.deviceType not in opts.filter_on_type:\n return False\n\n return True", "def filter_masses_by_intensity(maximum_intensity_peak, peaks, lower_threshold, weak_percentage = False):\n\n maximum_intensity = maximum_intensity_peak.get_intensity()\n not_list = []\n weak_list = []\n strong_list = []\n\n for peak in peaks:\n done = False\n i_peak = peak.get_intensity()\n \n if i_peak < lower_threshold:\n\n not_list.append(peak)\n \n else:\n\n if weak_percentage != False:\n\n medium_threshold = maximum_intensity * weak_percentage / 100\n \n if i_peak < medium_threshold:\n\n weak_list.append(peak)\n \n else:\n\n strong_list.append(peak)\n \n else:\n\n strong_list.append(peak)\n \n return_list = [not_list, weak_list, strong_list]\n return(return_list)", "def filter_and_threshold(ctx: Context):\n if ctx.skin_color is None:\n return\n\n # Soften image\n cv2.GaussianBlur(ctx.image, (11, 11), 0, ctx.temp_image3)\n # Denoise\n cv2.medianBlur(ctx.temp_image3, 11, ctx.temp_image3)\n\n # Look for approximated skin color\n tolerance = (ctx.tolerance / 100) * ctx.skin_variance\n cv2.inRange(ctx.temp_image3, ctx.skin_color - tolerance, ctx.skin_color + tolerance, ctx.filter_image)\n\n cv2.morphologyEx(ctx.filter_image, cv2.MORPH_OPEN, None, ctx.filter_image)\n cv2.morphologyEx(ctx.filter_image, cv2.MORPH_CLOSE, None, ctx.filter_image)\n\n cv2.GaussianBlur(ctx.image, (3, 3), 0, ctx.filter_image)", "def _filter_return_url(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def filter_mentor_advise(image):\n HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n # For yellow\n yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))\n\n # For white\n sensitivity_1 = 68\n white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))\n\n sensitivity_2 = 60\n HSL = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))\n white_3 = cv2.inRange(image, (200,200,200), (255,255,255))\n\n bit_layer = yellow | white | white_2 | white_3\n\n return bit_layer", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def filterWithSITK(self):\r\n # research\r\n profbox()\r\n backgroundNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n backgroundNodeName = backgroundNode.GetName()\r\n backgroundImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n filterImage = sitk.GradientMagnitudeRecursiveGaussian(backgroundImage, float(2));\r\n del backgroundImage\r\n sitk.WriteImage(filterImage, sitkUtils.GetSlicerITKReadWriteAddress(backgroundNodeName))\r\n\r\n # notify\r\n backgroundNode.GetImageData().Modified()\r\n backgroundNode.Modified()", "def filter(self, regex_pattern: str) -> None:\n self._filter_attachment_list(regex_pattern)\n self._filter_url_list(regex_pattern)", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def prepare_filter_params(context, plan_name=None, **kw):\n from debra.models import Influencer\n from debra import logical_categories\n from django.core.cache import get_cache\n cache = get_cache('memcached')\n params = None #cache.get('filter_params')\n if not params:\n # influencers = Influencer.objects.filter(\n # show_on_search=True).exclude(blacklisted=True)\n # influencers = influencers.filter(\n # score_popularity_overall__isnull=False)\n # influencers = influencers.distinct()\n popularity = [\n {\n \"title\": \"Small\",\n },\n {\n \"title\": \"Medium\",\n },\n {\n \"title\": \"Large\",\n }\n ]\n engagement = [\n {\n \"title\": \"0-20\",\n },\n {\n \"title\": \"21-40\",\n },\n {\n \"title\": \"41-60\",\n },\n {\n \"title\": \"61-80\",\n },\n {\n \"title\": \"81+\",\n },\n ]\n\n price_ranges = [\n {\n \"title\": \"Cheap\",\n \"text\": \"Primarily In-expensive\"\n },\n # {\n # \"title\": \"Mid-level\",\n # },\n { \n \"title\": \"Expensive\",\n \"text\": \"Primarily High-end\"\n }\n ]\n\n genders = [\n {\n \"title\": \"Female\",\n },\n {\n \"title\": \"Male\",\n },\n ]\n\n social = [\n {\n \"value\": \"Facebook\",\n \"icon\": \"icon-social_facebook\"\n },\n {\n \"value\": \"Pinterest\",\n \"icon\": \"icon-social_pinterest2\"\n },\n {\n \"value\": \"Twitter\",\n \"icon\": \"icon-social_twitter\"\n },\n {\n \"value\": \"Instagram\",\n \"icon\": \"icon-social_instagram2\"\n },\n {\n \"value\": \"Youtube\",\n \"icon\": \"icon-social_youtube\"\n },\n ]\n\n age_groups = [\n {\n \"value\": \"0_19\",\n \"icon\": \"0 - 19\"\n },\n {\n \"value\": \"20_24\",\n \"icon\": \"20 - 24\"\n },\n {\n \"value\": \"25_29\",\n \"icon\": \"25 - 29\"\n },\n {\n \"value\": \"30_34\",\n \"icon\": \"30 - 34\"\n },\n {\n \"value\": \"35_39\",\n \"icon\": \"35 - 39\"\n },\n {\n \"value\": \"40\",\n \"icon\": \"40+\",\n }\n ]\n\n activity = [{\"value\": \"Blog\", \"icon\": \"icon icon-letter_quotes2\"}] + social\n\n categories = []\n\n brands = []\n\n locations = redis_cache.get('toplocs') or []\n # locations = Influencer.get_locations_list(num_results=200)\n # locations = Influencer.get_locations_list(num_results=None)\n\n tags = kw.get('tags', [])\n\n source = [{\"title\": \"Signup\", \"value\": \"blogger_signup\"}]\n\n params = {\n 'show_filters': True,\n 'popularity': list(popularity),\n 'engagement': list(engagement),\n 'categories': list(categories),\n 'brands': list(brands),\n 'priceranges': list(price_ranges),\n 'locations': list(locations),\n 'genders': list(genders),\n 'social': list(social),\n 'activity': list(activity),\n 'tags': list(tags),\n 'source': list(source),\n 'age_groups': list(age_groups),\n 'enabled_filters': [\n \"popularity\", \"engagement\", \"categories\", \"brands\",\n \"priceranges\", \"location\", \"genders\", \"socials\", \"activity\",\n \"tags\", \"likes\", \"shares\", \"comments\", \"source\", \"avgAge\",\n \"customCategories\", \"customOccupation\", \"customSex\", \"customEthnicity\",\n \"customTags\", \"customLanguage\", \"customAgeRange\",]\n }\n cache.set('filter_params', params)\n\n for loc in params.get('locations', []):\n loc['value'] = loc['title']\n\n if True: #settings.DEBUG:\n params['categories'] = [{\"title\": \"Fashion\", \"category\": \"fashion\"},\n {\"title\": \"Food\", \"category\": \"food\"},\n {\"title\": \"Kids\", \"category\": \"kids\"},\n {\"title\": \"Beauty\", \"category\": \"beauty\"},\n {\"title\": \"Travel\", \"category\": \"travel\"}]\n else:\n params['categories'] = []\n \n return params", "def filteringEngine(original, debug=False):\n\n processedImage1 = filterNotInRange(original, LABmin_healthy, LABmax_healthy, cv2.COLOR_BGR2LAB)\n processedImage2 = filterNotInRange(original, LABmin_terrain, LABmax_terrain, cv2.COLOR_BGR2LAB)\n # Image containing many FPs\n processedImage3 = filterNotInRange(original, HSVmin_yellow, HSVmax_yellow, cv2.COLOR_BGR2HSV)\n\n sum1 = cv2.add(processedImage1, processedImage2)\n sub1 = differentialNode(original, sum1)\n\n processedImage = filterNotInRange(sub1, LABmin, LABmax, cv2.COLOR_BGR2LAB)\n # sum2 = cv2.add(processedImage, processedImage3)\n\n kernel = np.ones((6, 6), np.uint8)\n temp = closing(processedImage, kernel)\n\n kernel = np.ones((3, 3), np.uint8)\n out = opening(temp, kernel)\n\n if debug:\n cv2.imshow('processedImage1', processedImage1)\n cv2.imshow('processedImage2', processedImage2)\n cv2.imshow('processedImage3', processedImage3)\n cv2.imshow('sum1', sum1)\n cv2.imshow('sub1', sub1)\n cv2.imshow('processedImage', processedImage)\n cv2.imshow('sum2', sum2)\n cv2.imshow('out', out)\n\n return out", "def filter_on_adwin_parameters(a_lt3,a_lt4,**kw):\r\n\r\n filter_params = kw.pop('adwin_filter_params',{})\r\n if len(filter_params):\r\n old_params = analysis_params.SPSP_fltr_adwin_settings\r\n \r\n for setup_key,setup_dict in filter_params.iteritems():\r\n for key,params in setup_dict.iteritems():\r\n analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+setup_key][key] = params\r\n\r\n fltr = np.array([True]*len(a_lt3.agrp['ssro_results'].value)) ### initially everything true\r\n\r\n for a,suffix in zip([a_lt3,a_lt4],['lt3','lt4']): ### loop over both files\r\n for key,val in analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+suffix].iteritems(): ### loop over the list of filter parameters\r\n [filter_on,minimum,maximum] = val\r\n\r\n if filter_on:\r\n if key == 'repetition_number':\r\n values = np.array([i for i in range(len(fltr)/a.g.attrs['sweep_length']) for _ in range(a.g.attrs['sweep_length'])]) ### Make an array of values corresponding to the current rep\r\n else:\r\n values = a.agrp[key].value\r\n\r\n fltr = np.logical_and(fltr,(values >= minimum) & ( values <= maximum)) ### update filter\r\n\r\n if len(filter_params):\r\n analysis_params.SPSP_fltr_adwin_settings = old_params\r\n\r\n return fltr", "def lightCurve(self, time, filters):\n\n lcMags = np.zeros(time.size, dtype=float)\n\n rise = np.where(time <= self.peakTime)\n lcMags[rise] += self.riseSlope*time[rise]-self.riseSlope*self.peakTime\n decline = np.where(time > self.peakTime)\n lcMags[decline] += self.declineSlope*(time[decline]-self.peakTime)\n\n for key in self.peaks.keys():\n fMatch = np.where(filters == key)\n lcMags[fMatch] += self.peaks[key]\n\n return lcMags", "def filter_dataset(source_path, dataset_path, progress_bar, info_label, progress, root):\n # dictionary to store two source path\n source_path_name = {}\n for d in SUB_DIRS:\n source_path_name[f\"{d}\"] = os.path.join(source_path, d)\n\n if not os.path.exists(source_path + \"/\" + SUB_DIRS[0]) and not os.path.exists(source_path + \"/\" + SUB_DIRS[1]):\n messagebox.showerror(\"Message\", \"Please check whether source directory, \\n \\\n must contain 'attentive' and 'not_attentive' dataset\")\n else:\n attentive = set()\n not_attentive = set()\n\n total_img = len(os.listdir(source_path + \"/\" + SUB_DIRS[0])) + len(os.listdir(source_path + \"/\" + SUB_DIRS[1]))\n i = 0\n\n # for attentive images in format particular format and availability of face\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[0]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[0] + \"/\" + image):\n attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Not Attentive set filtering is on progress'\n\n # for not attentive images\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[1]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[1] + \"/\" + image):\n not_attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Filtering is completed'\n progress.destroy()\n\n attentive, not_attentive = list(attentive), list(not_attentive)\n\n if len(attentive) > 200 and len(not_attentive) > 200:\n next_page_interface(source_path_name, dataset_path, attentive, not_attentive, root)\n else:\n messagebox.showerror(\"Message\", \"Valid Image Count Is Less Than 100\")", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def attribute_search(self, attribute, filters):\n for i in self.response_info['results']:\n if filters in i[attribute]:\n self.output.append(i)\n self.counter += 1", "def no_lens_light(\r\n path_prefix: str,\r\n analysis: Union[al.AnalysisImaging, al.AnalysisInterferometer],\r\n setup_hyper: al.SetupHyper,\r\n source_results: af.ResultsCollection,\r\n mass: af.Model(al.mp.MassProfile) = af.Model(al.mp.EllIsothermal),\r\n smbh: af.Model(al.mp.MassProfile) = None,\r\n mass_centre: Optional[Tuple[float, float]] = None,\r\n end_with_hyper_extension: bool = False,\r\n unique_tag: Optional[str] = None,\r\n session: Optional[bool] = None,\r\n) -> af.ResultsCollection:\r\n\r\n \"\"\"\r\n __Model + Search + Analysis + Model-Fit (Search 1)__\r\n\r\n In search 1 of the MASS TOTAL PIPELINE we fit a lens model where:\r\n\r\n - The lens galaxy mass is modeled using a total mass distribution [Priors initialized from SOURCE PIPELINE].\r\n - The source galaxy's light is parametric or an inversion depending on the previous pipeline [Model and priors \r\n initialized from SOURCE PIPELINE].\r\n\r\n This search aims to accurately estimate the lens mass model, using the improved mass model priors and source model \r\n of the SOURCE PIPELINE\r\n \"\"\"\r\n mass = slam_util.mass__from_result(\r\n mass=mass, result=source_results.last, unfix_mass_centre=True\r\n )\r\n\r\n if mass_centre is not None:\r\n mass.centre = mass_centre\r\n\r\n if smbh is not None:\r\n smbh.centre = mass.centre\r\n\r\n source = slam_util.source__from_result_model_if_parametric(\r\n result=source_results.last, setup_hyper=setup_hyper\r\n )\r\n\r\n model = af.Collection(\r\n galaxies=af.Collection(\r\n lens=af.Model(\r\n al.Galaxy,\r\n redshift=source_results.last.instance.galaxies.lens.redshift,\r\n mass=mass,\r\n smbh=smbh,\r\n shear=source_results.last.model.galaxies.lens.shear,\r\n ),\r\n source=source,\r\n )\r\n )\r\n\r\n search = af.DynestyStatic(\r\n path_prefix=path_prefix,\r\n name=\"mass_total[1]_mass[total]_source\",\r\n unique_tag=unique_tag,\r\n session=session,\r\n nlive=100,\r\n )\r\n\r\n result_1 = search.fit(model=model, analysis=analysis)\r\n\r\n \"\"\"\r\n __Hyper Extension__\r\n\r\n The above search may be extended with a hyper-search, if the SetupHyper has one or more of the following inputs:\r\n\r\n - The source is using an `Inversion`.\r\n - One or more `HyperGalaxy`'s are included.\r\n - The background sky is included via `hyper_image_sky` input.\r\n - The background noise is included via the `hyper_background_noise`.\r\n \"\"\"\r\n\r\n if end_with_hyper_extension:\r\n\r\n result_1 = extensions.hyper_fit(\r\n setup_hyper=setup_hyper,\r\n result=result_1,\r\n analysis=analysis,\r\n include_hyper_image_sky=True,\r\n )\r\n\r\n return af.ResultsCollection([result_1])", "def item_filter(item):\n\tcch_geoserver_services = get_only_cch_geoserver_services(item['services'])\n\thas_cch_geoserver_services = 0 != len(cch_geoserver_services)\n\tis_data = 'data' == item['itemType']\n\treturn is_data and has_cch_geoserver_services;", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):\n current_date = current_timestamp.date()\n assets = []\n for idx, row in universe.iterrows():\n if row.start_date <= current_date <= row.end_date:\n assets = row.assets\n break\n\n filtered = {}\n for feature, df in data.items():\n filtered[feature] = df.drop(df.columns.difference(assets), axis=1)\n\n return filtered", "def test_find_tfl_lights(image_path, json_path=None, fig_num=None):\n image = np.array(Image.open(image_path))\n\n if json_path is None:\n objects = None\n else:\n gt_data = json.load(open(json_path))\n what = ['traffic light']\n objects = [o for o in gt_data['objects'] if o['label'] in what]\n\n find_tfl_lights(image)", "def filterMe(self, asset = '', sAr = ['', ''] ):\n\t\tif self._objects:\n\t\t\tself._objects = [ mn.Node( o.name.replace( sAr[0], sAr[1] ) ) for o in self._objects ]\n\t\tif self._overrides:\n\t\t\tself._overrides = dict( [ (mn.Node( a.name.replace( sAr[0], sAr[1] )), self._overrides[a] ) for a in self._overrides.keys() ] )\n\t\tif self._overconns:\n\t\t\tself._overconns = dict( [(mn.Node(a.name.replace( sAr[0], sAr[1] )), mn.Node(self._overconns[a].name.replace( sAr[0], sAr[1] ))) for a in self._overconns.keys() ] )", "def use_effect(effect, photo_edit):\n if effect in FILTERS:\n photo = Image.open(photo_edit.upload)\n photo = photo.filter(FILTERS.get(effect))\n\n photo.save(photo_edit.upload.url[1:])", "def test_vs_filtering():\n vs = virtualscreening(n_cpu=-1)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro5', soft_fail=1)\n assert_equal(len(list(vs.fetch())), 49)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro3', soft_fail=2)\n assert_equal(len(list(vs.fetch())), 9)", "def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered", "def filter(self):\n new_nodes_to_update = {}\n nodes_to_update = {}\n\n for agent_id in self.cameras.keys():\n nodes_to_update[agent_id] = []\n new_nodes_to_update[agent_id] = []\n if agent_id not in self.beliefs:\n world_name = self.cameras[agent_id].name.replace(\"-\",\"_\")+\"_beliefs\"\n rospy.logdebug(\"[perspective_filter] create new world <%s>\" % str(world_name))\n self.beliefs[agent_id] = self.ctx.worlds[world_name]\n self.node_mapping[agent_id] = {}\n\n dq = deque()\n dq.append(self.source.scene.rootnode)\n\n while not rospy.is_shutdown() and 0 < len(dq):\n node = dq.pop()\n if node.id != self.source.scene.rootnode.id:\n # Process start here\n if node.id in self.cameras.keys(): # if the node is the agent POV\n nodes_to_update[node.id].append(node) # we add it to his belief\n\n if node.parent in self.cameras.keys() and node.type == MESH: # if the node is part of an agent\n nodes_to_update[node.parent].append(node) # we add it to his belief\n\n for agent_id, visible_nodes in self.visible_nodes.items(): # then we add the visible nodes\n if agent_id in self.cameras.keys():\n if node in visible_nodes:\n nodes_to_update[agent_id].append(node)\n\n # And end here\n for child_id in node.children:\n dq.append(self.source.scene.nodes[child_id])\n\n for agent_id, nodes in nodes_to_update.items():\n if nodes:\n for node in nodes:\n new_node = node.copy()\n if node.id in self.node_mapping[agent_id]:\n new_node.id = self.node_mapping[agent_id][node.id]\n if new_node.id in self.nodes_transform:\n if not numpy.allclose(self.nodes_transform[new_node.id], new_node.transformation):\n new_nodes_to_update[agent_id].append(new_node)\n self.nodes_transform[new_node.id] = new_node.transformation\n else:\n self.nodes_transform[new_node.id] = new_node.transformation\n new_nodes_to_update[agent_id].append(new_node)\n else:\n self.node_mapping[agent_id][node.id] = new_node.id\n new_nodes_to_update[agent_id].append(new_node)\n\n # Finally we update the corresponding beliefs worlds\n for agent_id, nodes in new_nodes_to_update.items():\n for node in nodes:\n node.parent = self.node_mapping[agent_id][node.parent] if node.parent in self.node_mapping[agent_id] \\\n else self.beliefs[agent_id].scene.rootnode.id\n if nodes:\n self.beliefs[agent_id].scene.nodes.update(nodes)", "def _hpfilter_one_return(series, lamb=1600, part=\"trend\"):\n hp_cycle, hp_trend = hpfilter(series, lamb)\n if part == \"cycle\":\n return hp_cycle\n else:\n return hp_trend", "def _filter_data(analyzed_tweet_data: list, start_date, end_date, hashtags, mentions, urls):\n # filter by dates\n filtered_data = get_tweets_in_daterange(\n analyzed_tweet_data, start_date, end_date)\n print(\"Done filtering on date...\")\n if hashtags:\n filtered_data = _filter_search_values(\n 'hashtags', hashtags, filtered_data)\n print(f'Done filtering on hashtags: {hashtags}')\n if mentions:\n filtered_data = _filter_search_values(\n 'mentions', mentions, filtered_data)\n print(f'Done filtering on mentions: {mentions}')\n if urls:\n filtered_data = _filter_search_values(\n 'tweet_urls', urls, filtered_data)\n print(f'Done filtering on urls: {urls}')\n\n return filtered_data", "def filter_images(data, split_data):\n all_split_ids = set()\n for split_name, ids in split_data.iteritems():\n all_split_ids.update(ids)\n new_data = []\n for img in data:\n keep = img['id'] in all_split_ids and len(img['regions']) > 0\n if keep:\n new_data.append(img)\n return new_data", "def filter_fusion(luma_bin, sat_bin, grad_bin, mentor_bin):\n binary = np.zeros_like(luma_bin)\n binary[ (((grad_bin==1) | (sat_bin==1)) & (luma_bin==1)) | (mentor_bin==1) ] = 1\n\n # Erosion and dilation - Seems doesn't work. Mask-off\n #kernel = np.ones((5,5))\n #binary_dilation = cv2.dilate(binary, kernel, iterations=1)\n #binary_erosion = cv2.erode(binary_dilation, kernel, iterations=1)\n #binary = binary_erosion\n\n return binary" ]
[ "0.5837777", "0.5805665", "0.56016874", "0.53269315", "0.53257084", "0.52495986", "0.52477026", "0.5235335", "0.51835656", "0.51263887", "0.5116933", "0.51134336", "0.50554407", "0.505449", "0.5049599", "0.5036466", "0.5035692", "0.50177574", "0.50123763", "0.49872875", "0.4979257", "0.4975004", "0.4927894", "0.4924816", "0.49214503", "0.49190053", "0.49179408", "0.49038744", "0.49023634", "0.48989546", "0.48938364", "0.48911357", "0.48873457", "0.48674646", "0.48631817", "0.48467785", "0.48370892", "0.483021", "0.48266652", "0.48107886", "0.480512", "0.47881848", "0.47876137", "0.47804815", "0.4772523", "0.4764213", "0.4763246", "0.47614592", "0.47529104", "0.4734397", "0.47152743", "0.47132623", "0.46957123", "0.46899074", "0.46810767", "0.46798065", "0.4677797", "0.46755832", "0.4668701", "0.466415", "0.4661341", "0.46572027", "0.46449718", "0.46410692", "0.46397218", "0.46296987", "0.46296656", "0.46280777", "0.46274158", "0.46247634", "0.46143046", "0.46089986", "0.46078083", "0.46049136", "0.46001297", "0.4591747", "0.4588471", "0.45867547", "0.45859492", "0.4581831", "0.45802468", "0.45755258", "0.45751256", "0.45613265", "0.4556455", "0.4550795", "0.4549819", "0.45493048", "0.45441708", "0.45434874", "0.4536257", "0.45352137", "0.45258224", "0.45255613", "0.45191473", "0.45174915", "0.45173806", "0.4512032", "0.45043987", "0.44960767" ]
0.7130668
0
import aovs into scene
def importAovs(self): LayersInfo = pickle.load( open( self.aovsPath.path, "rb") ) mc.refresh( su = 1 ) for ao in LayersInfo.keys(): aov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_scene(file_path):\n\n pass", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n # cmds.file(absSceneFile, i=True)\n nuke.nodePaste(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(210, msg)\n return -1, msg", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def importExternal(*args):\n goTo = pi.currentProject\n impFile = cmds.fileDialog2(fm=1, dir = goTo)[0]\n if impFile:\n cmds.file(impFile, i=True)", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def open_scene(file_path, save=True):\n\n pass", "def create_scene(self):\n \n self.scene=soya.World()", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def import_game_graph(self):\n self._import_win()\n self._import_loose()", "def main():\r\n \r\n world = WorldModel()\r\n #uncomment these lines and comment out the next 2 if you want to use the\r\n #full Baxter model\r\n #print \"Loading full Baxter model (be patient, this will take a minute)...\"\r\n #world.loadElement(os.path.join(model_dir,\"baxter.rob\"))\r\n print \"Loading simplified Baxter model...\"\r\n world.loadElement(os.path.join(model_dir,\"baxter_col.rob\"))\r\n print \"Loading Kiva pod model...\"\r\n world.loadElement(os.path.join(model_dir,\"kiva_pod/model.obj\"))\r\n print \"Loading plane model...\"\r\n world.loadElement(os.path.join(model_dir,\"plane.env\"))\r\n \r\n #shift the Baxter up a bit (95cm)\r\n Rbase,tbase = world.robot(0).getLink(0).getParentTransform()\r\n world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))\r\n \r\n #translate pod to be in front of the robot, and rotate the pod by 90 degrees \r\n Trel = (so3.rotation((0,0,1),math.pi/2),[1.1,0,0])\r\n T = world.rigidObject(0).getTransform()\r\n world.rigidObject(0).setTransform(*se3.mul(Trel,T))\r\n \r\n #run the visualizer\r\n visualizer = MyGLViewer(world)\r\n visualizer.run()", "def chooseOpenFile(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n filter=\"Meshes (*.stl)\")\n if fname[0] == '':\n return\n name = fname[0][:-4].split('/')[-1]\n self.files[name] = AmpObject(fname[0], 'limb')\n amp = self.files[name]\n amp.addActor()\n amp.tform = vtk.vtkTransform()\n amp.tform.PostMultiply()\n amp.actor.SetUserTransform(amp.tform)\n# amp.centre()\n self.fileManager.addRow(name, amp)\n self.display()\n self.filesDrop.append(name)\n if hasattr(self, 'alCont'):\n self.alCont.getNames()\n if hasattr(self, 'regCont'):\n self.regCont.getNames()", "def do_poortego_import(self, arg, opt):\n poortego_import(self.my_interface, arg, opt)", "def __init__(self):\r\n self.label = \"OVL to Feature\"\r\n self.description = \"OVL to Feature converts an OVL file from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def reference_scene(file_path, **kwargs):\n\n pass", "def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n self.__importLogFiles.append(self.__logFiles[-1])\r\n \r\n command = (\"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \"\\\"\\n\"\r\n \"NewScene, false\\n\")\r\n if (FUtils.GetExtension(filename) == \"dae\"):\r\n command = (command + \r\n \"set myIProp = CreateImportFTKOptions()\\n\" +\r\n \"myIProp.Parameters(\\\"Filename\\\").Value = \\\"\" + \r\n filename.replace(\"\\\\\", \"\\\\\\\\\") +\"\\\"\\n\" +\r\n \"myIProp.Parameters(\\\"Verbose\\\").Value = True\\n\")\r\n for setting in settings:\r\n value = setting.GetValue().strip()\r\n if (value == \"\"):\r\n value = self.FindDefault(FXsi.__IMPORT_OPTIONS, \r\n setting.GetPrettyName())\r\n command = (command + \"myIProp.Parameters(\\\"\" + \r\n setting.GetCommand() + \"\\\").Value = \" + value + \"\\n\")\r\n command = command + \"ImportFTK myIProp.Name \\n\"\r\n elif (FUtils.GetExtension(filename) == \"scn\"):\r\n command = (command +\r\n \"OpenScene \\\"\" + filename.replace(\"\\\\\",\"\\\\\\\\\") + \"\\\"\\n\")\r\n else: \r\n return\r\n \r\n self.__currentImportProperName = FUtils.GetProperFilename(filename)\r\n basename = self.__currentImportProperName + \".scn\"\r\n\r\n# self.__script.write(\r\n# command +\r\n# \"SearchAndReplacePath \\\"All\\\", \\\"\" + FXsi.__REPLACE_PATH + \r\n# \"\\\", \\\"\" + \r\n# os.path.dirname(filename).replace(\"\\\\\", \"\\\\\\\\\") + \r\n# \"\\\", True\\n\" +\r\n# \"SaveSceneAs \\\"\" + \r\n# os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n# \"\\\"\\n\"\r\n# )\r\n \r\n self.__script.write(\r\n command +\r\n \"SaveSceneAs \\\"\" + \r\n os.path.join(path, basename).replace(\"\\\\\", \"\\\\\\\\\") +\r\n \"\\\"\\n\"\r\n )\r\n \r\n self.__testCount = self.__testCount + 1\r\n \r\n return [basename,]", "def test_visuThreeD1(self):\n\n visu_logic = slicer.modules.visuThreeDWidget.logic\n #visu_logic.set_user_table(self.user_table)\n #visu_logic.set_user_file('/work/maria5/EBDS_CIVILITY/DataShare/TestMatricesForVisualization/AAL78/PerNodeMetrics/Conte_EigenVectorCentrality_4Yr_AAL78Regions.csv')\n #visu_logic.set_user_file('/Users/Wieke/Documents/visuThreeD/neo-0042-4year_AvgSym_normFull.csv')\n # visu_logic.create_node_actors()\n # visu_logic.create_line_actors()\n # visu_logic.update()\n #visu_logic.set_node_range()", "def import_(self, version):\n #nuke.nodePaste(version.absolute_full_path)\n return True", "def run():\n from cgl.plugins.blender.tasks.rig import parent_mdl_to_rig\n parent_mdl_to_rig()", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def connectMasterScene():\n try:\n nuke.toNode('Viewer1').setInput(0, nuke.toNode('MASTER_SCENE'))\n except:\n print 'no master scene found!'", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def main():\n obj = UnityFilesystem()\n obj.perform_module_operation()", "def AgiImport(dirpath, file):\n objPath=dirpath+'\\\\'+file\n if os.path.exists(objPath)==False:\n print objPath\n return\n \n ## Open new template file ##\n template = rs.TemplateFile()\n cmd=\"-_New \"\n cmd+=template+\" \"\n rs.Command(cmd)\n \n \n cmd=\"-_Import \"\n cmd+='\"'+os.path.abspath(objPath)+'\"'+\" \"\n cmd+=\"IgnoreTextures=No \"\n cmd+=\"MapOBJToRhinoZ=Yes \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n \n rs.Command(\"SplitDisjointMesh \")\n \n meshes = rs.LastCreatedObjects()\n max=0\n keep=None\n for guid in meshes:\n mesh = rs.coercemesh(guid)\n count = mesh.Faces.Count\n if count > max:\n keep = guid\n max = count\n \n if keep:\n meshes.remove(keep)\n rs.DeleteObjects(meshes)\n \n rs.ZoomExtents(all=True)\n \n cmd=\"-_SaveAs \"\n cmd+=\"SaveTextures=Yes \"\n cmd+='\"'+os.path.abspath(objPath).replace(\".wrl\",\".3dm\")+'\"'+\" \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n rs.DocumentModified(False)\n Rhino.RhinoApp.Wait()\n Rhino.RhinoApp.Wait()", "def exportAovs(self):\n\t\taovs = mc.ls( typ = 'aiAOV' )\n\t\taovData = {}\n\t\tfor a in aovs:\n\t\t\taovData[a] = {}\n\t\t\taovData[a]['enabled'] = mc.getAttr( a + '.enabled' )\n\t\t\taovData[a]['name'] = mc.getAttr( a + '.name' )\n\t\t\taovData[a]['type'] = mc.getAttr( a + '.type' )\n\t\tpickle.dump( aovData, open( self.aovsPath.path, \"wb\" ) )", "def run(args=None):\n parser = OptionParser(description='Shows how to use different IK solutions for arms with few joints.')\n OpenRAVEGlobalArguments.addOptions(parser)\n parser.add_option('--scene',action=\"store\",type='string',dest='scene',default='tridoftable.env.xml',\n help='Scene file to load (default=%default)')\n parser.add_option('--manipname',action=\"store\",type='string',dest='manipname',default=None,\n help='name of manipulator to use (default=%default)')\n (options, leftargs) = parser.parse_args(args=args)\n OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)", "def __init__(self, scene: Scene):\n self.scene = scene", "def main():\n obj = VplexStorageview()\n obj.perform_module_operation()", "def script(self):", "def vesuvio_example():\n router = Router(topo_file=PROJECT_PATH + \"vtk/Vesuvio\")\n router.routing(32729, 31991)\n # write to vtk\n router.write2vtk(router.acqueduct)\n # render_vtk(\"vtk/Vesuvio\")", "def import_robot(rigs_dir):\n\n # If the scene is in IK mode, switch to FK before importing the robot\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTabIndex=True)\n if current_tab == 2:\n pm.tabLayout('switcher_tab_layout', edit=True, selectTabIndex=1)\n\n rigs = general_utils.get_rigs_dict()\n rig_names = general_utils.get_rigs_names(rigs)\n for rig_name in rig_names:\n try:\n if pm.optionMenu('robotImportList',\n query=True,\n value=True) == rig_name:\n try:\n rig_path = rigs[rig_name]\n pm.importFile(rig_path,\n defaultNamespace=True,\n returnNewNodes=True)\n except:\n pm.warning('Error Loading ' + rig_name)\n except:\n pm.warning('No robots found; check rig directory')", "def import_region(self, region_id, action=\"import\"):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})\n for groupid, scenegroup in scenedata['res'].items():\n getattr(self, action+\"_group\")(groupid, scenegroup, 10)\n self.queueRedraw('VIEW3D')", "def import_model(command):\n namespace = app.main(command)\n assert namespace.command == 'im' or namespace.command == \"importmodel\"\n assert namespace.modelpath == \"test1\"\n assert namespace.convertpath == \"test2\"", "def load_velo(self):\n # Find all the Velodyne files\n velo_path = os.path.join(self.sequence_path, 'velodyne', '*.bin')\n velo_files = sorted(glob.glob(velo_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n velo_files = [velo_files[i] for i in self.frame_range]\n\n print('Found ' + str(len(velo_files)) + ' Velodyne scans...')\n\n # Read the Velodyne scans. Each point is [x,y,z,reflectance]\n self.velo = utils.load_velo_scans(velo_files)\n\n print('done.')", "def __init__(self, scene): # type: (Scene) -> None\n self.scene = scene", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def main():\n\n viewer = Viewer(1900, 1200)\n viewer.add((init_BaracuddaFish()))\n viewer.add(init_BlueStarFish())\n viewer.add_movable(init_SeaSnake())\n init_groupeOfFishs(viewer)\n\n under_water = [\n 'res/skybox/underwater/uw_lf.jpg',\n 'res/skybox/underwater/uw_rt.jpg',\n 'res/skybox/underwater/uw_up.jpg',\n 'res/skybox/underwater/uw_dn.jpg',\n 'res/skybox/underwater/uw_ft.jpg',\n 'res/skybox/underwater/uw_bk.jpg']\n viewer.add(Skybox(under_water))\n\n viewer.run()", "def make_main(self):\n\t\tself.scene.camera = self.main_camera", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def main(unused_argv):\n\n # Read the scene file.\n with open(FLAGS.scene_path, 'r') as file_id:\n scenes = json.load(file_id)\n\n # Read the synonyms file.\n with open(FLAGS.synonym_path, 'r') as file_id:\n synonyms = json.load(file_id)\n sorter = lambda x: len(x[0].split(' '))\n\n # Read the metainformation file.\n with open(FLAGS.metainfo_path, 'r') as file_id:\n gvars.METAINFO = json.load(file_id)\n tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items()\n if tag != '<P>'}\n gvars.METAINFO['tag_inv_map'] = tag_inv_map\n gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(),\n key=sorter, reverse=True)\n\n # Add ids to objects.\n scenes = utils.add_object_ids(scenes)\n scenes = utils.clean_object_attributes(scenes)\n\n # Read the caption templates.\n template_paths = os.listdir(FLAGS.caption_template_root)\n cap_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n cap_templates.extend(cur_templates)\n #utils.pretty_print_templates(cap_templates, 1)\n\n # Read the question templates.\n template_paths = os.listdir(FLAGS.question_template_root)\n ques_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n ques_templates.extend(cur_templates)\n #utils.pretty_print_templates(ques_templates, 1)\n\n # 1. Check if there a scene_id_file specified.\n # 2. Check if num_images is -1\n if FLAGS.scene_id_file != '':\n with open(FLAGS.scene_id_file, 'r') as file_id:\n missing_ids = [int(ii.strip('\\n')) for ii in file_id.readlines()]\n print('Dialogs missing for scenes: %d' % len(missing_ids))\n\n # Create a image_index -> scenes list index dictionary\n image_list_id_dict = {ii['image_index']: index\n for index, ii in enumerate(scenes['scenes'])}\n scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]]\n for scene_id in missing_ids]\n\n elif FLAGS.num_images == -1:\n scenes_subset = scenes['scenes']\n\n else:\n scenes_subset = scenes['scenes'][0: FLAGS.num_images]\n\n # BFS for each scene.\n if FLAGS.num_workers == 1:\n # Single thread version.\n dialogs = []\n for index, scene in enumerate(scenes_subset):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' %\\\n (cur_time, 0, index, len(scenes_subset), scene['image_index']))\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(gen_dialog)\n\n else:\n # Multithread version.\n output_q = multiprocessing.Queue()\n jobs = []\n for worker_id in range(FLAGS.num_workers):\n allotment = scenes_subset[worker_id::FLAGS.num_workers]\n inputs = (allotment, cap_templates, ques_templates)\n inputs += (worker_id, output_q)\n\n process = multiprocessing.Process(target=worker, args=inputs)\n jobs.append(process)\n process.start()\n\n # Wait for all the jobs to finish and collect the output.\n final_results = {}\n for _ in jobs:\n final_results.update(output_q.get())\n for job in jobs:\n job.join()\n\n # Flatten and sort.\n final_results = [jj for _, ii in final_results.items() for jj in ii]\n dialogs = sorted(final_results, key=lambda x: x['image_index'])\n # utils.pretty_print_dialogs(dialogs)\n\n # Save the dialogs.\n print('Saving dialog at: %s' % FLAGS.save_path)\n with open(FLAGS.save_path, 'w') as file_id:\n json.dump(dialogs, file_id)", "def launch(self):", "def importScript():\n crosswalkFile = 'mayaToNuke.info'\n if os.path.exists(crosswalkFile):\n fileInfo = open(crosswalkFile, 'r')\n text = fileInfo.readlines()\n dic = eval(text[-1])\n nkFile = dic.get('file')\n if os.path.exists(nkFile):\n print 'importing: '+nkFile\n nuke.nodePaste(nkFile)\n else:\n print 'nuke script not found...'", "def import_armature(self, n_armature):\n \n armature_name = self.nif_import.import_name(n_armature)\n b_armature_data = bpy.data.armatures.new(armature_name)\n b_armature_data.draw_type = 'STICK'\n b_armature_obj = create_b_obj(armature_name, b_armature_data)\n b_armature_obj.show_x_ray = True\n \n # make armature editable and create bones\n bpy.ops.object.mode_set(mode='EDIT',toggle=False)\n for n_child in n_armature.children:\n self.import_bone(n_child, b_armature_data, n_armature)\n self.fix_bone_lengths(b_armature_data)\n bpy.ops.object.mode_set(mode='OBJECT',toggle=False)\n if NifOp.props.animation:\n self.nif_import.animationhelper.create_action(b_armature_obj, armature_name+\"-Anim\")\n\n # The armature has been created in editmode,\n # now we are ready to set the bone keyframes.\n # if NifOp.props.animation:\n # self.nif_import.animationhelper.armature_animation.import_armature_animation(b_armature_obj)\n \n # constraints (priority)\n # must be done outside edit mode hence after calling\n for bone_name, b_posebone in b_armature_obj.pose.bones.items():\n n_block = self.nif_import.dict_blocks[bone_name]\n self.nif_import.animationhelper.armature_animation.import_bone_animation(n_block, b_armature_obj, bone_name)\n # find bone nif block\n if bone_name.startswith(\"InvMarker\"):\n bone_name = \"InvMarker\"\n # store bone priority, if applicable\n if n_block.name in self.nif_import.dict_bone_priorities:\n # TODO: Still use constraints to store priorities? Maybe use a property instead.\n constr = b_posebone.constraints.new('TRANSFORM')\n constr.name = \"priority:%i\" % self.nif_import.dict_bone_priorities[niBone.name]\n return b_armature_obj", "def zoo_import(name, head=''):\n net = gz.get_model(name, pretrained=True)\n export_block(head + name, net, preprocess=True)", "def __init__(self, wink, opp):\n super().__init__(wink, opp)\n opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "def main():\n parser = cmdLineParse()\n inps = parser.parse_args()\n gf = asf.load_inventory(inps.inventory)\n\n if inps.template:\n print(f\"Reading from template file: {inps.template}...\")\n inputDict = dice.read_yaml_template(inps.template)\n else:\n inputDict = {\n \"topsinsar\": {\n \"sensorname\": \"SENTINEL1\",\n \"reference\": {\"safe\": \"\"},\n \"secondary\": {\"safe\": \"\"},\n }\n }\n\n intdir = \"int-{0}-{1}\".format(inps.reference, inps.secondary)\n if not os.path.isdir(intdir):\n os.mkdir(intdir)\n os.chdir(intdir)\n\n reference_urls = asf.get_slc_urls(gf, inps.reference, inps.path)\n secondary_urls = asf.get_slc_urls(gf, inps.secondary, inps.path)\n downloadList = reference_urls + secondary_urls\n inps.reference_scenes = [os.path.basename(x) for x in reference_urls]\n inps.secondary_scenes = [os.path.basename(x) for x in secondary_urls]\n\n if inps.poeorb:\n try:\n frame = os.path.basename(inps.reference_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n frame = os.path.basename(inps.secondary_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n except Exception as e:\n print(\"Trouble downloading POEORB... maybe scene is too recent?\")\n print(\"Falling back to using header orbits\")\n print(e)\n inps.poeorb = False\n pass\n\n # Update input dictionary with argparse inputs\n inputDict[\"topsinsar\"][\"reference\"][\"safe\"] = inps.reference_scenes\n inputDict[\"topsinsar\"][\"reference\"][\"output directory\"] = \"referencedir\"\n inputDict[\"topsinsar\"][\"secondary\"][\"safe\"] = inps.secondary_scenes\n inputDict[\"topsinsar\"][\"secondary\"][\"output directory\"] = \"secondarydir\"\n # Optional inputs\n # swaths, poeorb, dem, roi, gbox, alooks, rlooks, filtstrength\n if inps.swaths:\n inputDict[\"topsinsar\"][\"swaths\"] = inps.swaths\n if inps.dem:\n inputDict[\"topsinsar\"][\"demfilename\"] = inps.dem\n if inps.roi:\n inputDict[\"topsinsar\"][\"regionofinterest\"] = inps.roi\n if inps.gbox:\n inputDict[\"topsinsar\"][\"geocodeboundingbox\"] = inps.gbox\n if inps.filtstrength:\n inputDict[\"topsinsar\"][\"filterstrength\"] = inps.filtstrength\n if inps.alooks:\n inputDict[\"topsinsar\"][\"azimuthlooks\"] = inps.alooks\n if inps.rlooks:\n inputDict[\"topsinsar\"][\"rangelooks\"] = inps.rlooks\n print(inputDict)\n xml = dice.dict2xml(inputDict)\n dice.write_xml(xml)\n # Create a download file\n asf.write_download_urls(downloadList)\n print(f\"Generated download-links.txt and topsApp.xml in {intdir}\")", "def init_scene(self, a: AudioInitData = None) -> ActionStatus:\n\n if a is not None:\n o_id, o_commands = a.get_commands()\n self._object_init_commands[o_id] = o_commands\n return super().init_scene()", "def run_app(self):\r\n ## Tell the artist to be patient... eg not genY\r\n inprogressBar = pbui.ProgressBarUI(title = 'Rebuilding Surfacing Scene From Publish:')\r\n inprogressBar.show()\r\n inprogressBar.updateProgress(percent = 1, doingWhat = 'Processing scene info...')\r\n ## Instantiate the API\r\n tk = sgtk.sgtk_from_path(\"T:/software/bubblebathbay\")\r\n debug(app = self, method = 'run_app', message = 'API instanced...\\n%s' % tk, verbose = False)\r\n debug(app = self, method = 'run_app', message = 'Fetch Surface Shaders launched...', verbose = False)\r\n \r\n context = self.context ## To get the step\r\n debug(app = self, method = 'run_app', message = 'Context Step...\\n%s' % context.step['name'], verbose = False)\r\n if context.step['name'] != 'Surface':\r\n cmds.warning(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n QtGui.QMessageBox.information(None, \"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n raise tank.TankError(\"Current context is not a valid Surfacing context. Please make sure you are under a valid shotgun Surfacing context!\")\r\n \r\n scene_path = '%s' % os.path.abspath(cmds.file(query=True, sn= True))\r\n debug(app = self, method = 'run_app', message = 'scene_path... %s' % scene_path, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n entity = self.context.entity ## returns {'type': 'Shot', 'name': 'ep100_sh010', 'id': 1166}\r\n debug(app = self, method = 'run_app', message = 'entity... %s' % entity, verbose = False)\r\n \r\n ## Filter for the matching ID for the shot\r\n sg_filters = [[\"id\", \"is\", entity[\"id\"]]]\r\n debug(app = self, method = 'run_app', message = 'sg_filters... %s' % sg_filters, verbose = False)\r\n \r\n ## Build an entity type to get some values from.\r\n sg_entity_type = self.context.entity[\"type\"] ## returns Shot\r\n debug(app = self, method = 'run_app', message = 'sg_entity_type...\\n%s' % sg_entity_type, verbose = False)\r\n \r\n ## DATA\r\n ## NOTES SO HERE WE DON'T NEED TO CALL THE ASSETS FIELD FROM SHOTGUN\r\n ## WE CAN JUST GRAB THE LATEST PUBLISH FILE FROM EACH OF THE TEMPLATE STEPS\r\n inprogressBar.updateProgress(percent = 3, doingWhat = 'Processing scene info...')\r\n shadersTemplate = tk.templates[self.get_setting('maya_asset_SHD_XML_template')]\r\n debug(app = self, method = 'run_app', message = 'shadersTemplate...\\n%s' % shadersTemplate, verbose = False)\r\n\r\n ## PROCESS TEMPLATE NOW\r\n inprogressBar.updateProgress(percent = 5, doingWhat = 'Processing shaders xml...') \r\n debug(app = self, method = 'run_app', message = 'Processing template... %s' % shadersTemplate, verbose = False)\r\n ## SHADERS\r\n self.processTemplates(tk = tk, templateFile = shadersTemplate, id = entity[\"id\"], shotNum = entity[\"name\"], inprogressBar = inprogressBar, lighting = False)\r\n \r\n ############################################\r\n ## CORE ACHIVES \r\n ## Now process the assembly References\r\n debug(app = self, method = 'run_app', message = 'Processing mentalCore assemblies..', verbose = False)\r\n inprogressBar.updateProgress(percent = 50, doingWhat = 'Processing core archives...')\r\n if cmds.objExists('CORE_ARCHIVES_hrc') or cmds.objExists('CORE_ARCHIVES_hrc'):\r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'Complete...')\r\n inprogressBar.close()\r\n inprogressBar = None\r\n else:\r\n ## Get the assembly paths from the transforms in the scene with the correct tags to load now..\r\n self.getAssemblyPaths = coreLib.getCorePaths()\r\n debug(app = self, method = 'run_app', message = 'self.getAssemblyPaths.. %s' % self.getAssemblyPaths, verbose = False)\r\n \r\n ## Now load the assemblies from the paths\r\n coreLib.loadCoreArchives(paths = self.getAssemblyPaths)\r\n debug(app = self, method = 'run_app', message = 'self.loadCoreArchives Successful all assemblies loaded moving on to reconnect now...', verbose = False)\r\n inprogressBar.updateProgress(percent = 70, doingWhat = 'Core archives loaded...')\r\n \r\n ## Now connect the assemblies.\r\n inprogressBar.updateProgress(percent = 80, doingWhat = 'Reconnecting core archives...')\r\n coreLib.doReconnect(postPublish = False)\r\n debug(app = self, method = 'run_app', message = 'Ahh core archive assemblies reconnected successfully!!...', verbose = False)\r\n \r\n ## Now cleanup\r\n inprogressBar.updateProgress(percent = 90, doingWhat = 'Cleaning...')\r\n ## Group the placements\r\n cleanup.shotCleanupPlacements() \r\n ## Group the lights\r\n cleanup.shotCleanupLights()\r\n ## Put all the coreRebuild under Lighting_hrc group\r\n coreLib._cleanupCoreArchiveRebuildGrps('LIGHTING_hrc')\r\n \r\n \r\n inprogressBar.updateProgress(percent = 100, doingWhat = 'COMPLETE...')\r\n inprogressBar.close()\r\n inprogressBar = None", "def new_scene(force=True, **kwargs):\n\n pass", "def make_simple_scenes(self):\n clown = Clown()\n clown.set_location( 0, 0, 0 )\n clown.set_size( 1, 1, 1 )\n self.objects.append( clown )\n\n clown1Scene = Scene3D()\n clown1Scene.add_object( clown )\n self.scenes.append( clown1Scene )\n\n head = Head()\n head.set_location( 0, 0, 0 )\n head.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( head )\n\n headScene = Scene3D()\n headScene.add_object( head )\n self.scenes.append( headScene )\n\n hat = Hat()\n hat.set_location( 0, 0, 0 )\n hat.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( hat )\n\n hat1Scene = Scene3D()\n hat1Scene.add_object( hat )\n self.scenes.append( hat1Scene )\n\n eye = Eye()\n eye.set_color(1, 0, 0)\n eye.set_location(0, 1, 1)\n eye.set_size(1.3, 1.3, 1.3)\n eye.set_rotate( 45, 1, 0, 0 )\n self.objects.append( eye )\n\n eye1Scene = Scene3D()\n eye1Scene.add_object( eye )\n self.scenes.append( eye1Scene )\n\n donut = Donut()\n donut.set_color(1, 0, 1 )\n donut.set_location( 0, 0, 0 )\n donut.set_size( 2.0, 2.0, 2.0 )\n donut.set_rotate( 45, 0, 1, 0)\n self.objects.append( donut )\n\n donut1Scene = Scene3D()\n donut1Scene.add_object( donut )\n self.scenes.append( donut1Scene )\n\n cone = Cone()\n cone.set_color( 1, 0, 1 )\n cone.set_location( 0, 0, 0 )\n cone.set_size( 2.0, 2.0, 2.0 )\n self.objects.append( cone )\n\n cone1Scene = Scene3D()\n cone1Scene.add_object( cone )\n self.scenes.append( cone1Scene )\n\n box1 = self.make_box(1, Color(1, 0, 1))\n self.objects.append( box1 )\n\n box1Scene = Scene3D()\n box1Scene.add_object( box1 )\n self.scenes.append( box1Scene )\n\n box2 = self.make_box( 1, Color(0, 1, 1 ))\n box2.set_rotate( 45, 0, 0, 1 )\n box2.set_size(2.0, 2.0, 2.0)\n self.objects.append( box2 )\n\n box2Scene = Scene3D()\n box2Scene.add_object( box2 )\n self.scenes.append( box2Scene )\n\n sp = self.make_ball(1, Color(0.8, 0.8, 0))\n sp.set_size(2.0, 2.0, 2.0)\n self.objects.append( sp )\n\n ballScene = Scene3D()\n ballScene.add_object( sp )\n self.scenes.append( ballScene )", "def _CMD_IMPORT(self, file_name):\n # reset inspector:\n # self.inspector = DataInspectorRecord()\n\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n # self.model.from_json_dict(buff)\n self.model.from_mat_file(file_name)\n\n elif ext == 'json':\n buff = ''\n with open(file_name, 'rb') as f:\n buff = f.read()\n model = json.loads(buff)\n self.model.from_json_dict(model)\n\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))\n\n # update initial selection - first row:\n if len(self.model.data_list) > 0:\n self.handle_row_select([self.model.data_list[0]])", "def on_open_uv_editor():\n cmds.TextureViewWindow()", "def __init__(self, sw_path, json_path):\n Topo.__init__(self)\n\n # Directory where this file / script is located\"\n selfPath = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))) # script directory\n\n # Initialize a service helper for Quagga with default options\n quaggaSvc = QuaggaService(autoStop=False)\n\n # Path configurations for mounts\n quaggaBaseConfigPath = selfPath + '/configs/'\n\n # List of Quagga host configs\n quaggaHosts = {}\n #quaggaHosts['r1'] = (QuaggaHost(name='r1', ip='172.0.1.1/16', loIP='10.0.1.1/24'))\n quaggaHosts['r2'] = QuaggaHost(name='r2', ip='172.0.2.1/16', loIP='10.0.2.1/24')\n quaggaHosts['r3'] = QuaggaHost(name='r3', ip='172.0.3.1/16', loIP='10.0.3.1/24')\n quaggaHosts['r4'] = QuaggaHost(name='r4', ip='172.0.4.1/16', loIP='10.0.4.1/24')\n quaggaHosts['r5'] = QuaggaHost(name='r5', ip='172.0.5.1/16', loIP='10.0.5.1/24')\n #quaggaHosts['r6'] = (QuaggaHost(name='r6', ip='172.0.6.1/16', loIP='10.0.6.1/24'))\n\n\n # Add the switch for the SWIFTED router\n p4_switch = self.addSwitch('s1', dpid='1', sw_path=sw_path, json_path=json_path, thrift_port=_THRIFT_BASE_PORT)\n\n\n # Setup each Quagga router, add a link between it and the IXP fabric\n for name, host in quaggaHosts.iteritems():\n\n # Create an instance of a host, called a quaggaContainer\n quaggaContainer = self.addHost(name=host.name,\n ip=host.ip,\n hostname=host.name,\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n # Add a loopback interface with an IP in router's announced range\n self.addNodeLoopbackIntf(node=host.name, ip=host.loIP)\n\n # Configure and setup the Quagga service for this node\n quaggaSvcConfig = \\\n {'quaggaConfigPath': quaggaBaseConfigPath + host.name}\n self.addNodeService(node=host.name, service=quaggaSvc,\n nodeConfig=quaggaSvcConfig)\n\n r6 = self.addHost(name='r6',\n ip='172.0.6.1/16',\n hostname='r6',\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n\n r1 = self.addHost(name='r1',\n ip='172.0.1.1/16',\n hostname='r1',\n privateLogDir=True,\n privateRunDir=True,\n inMountNamespace=True,\n inPIDNamespace=True,\n inUTSNamespace=True)\n\n # Attach the quaggaContainer to the IXP Fabric Switch\n self.addLink('r1', p4_switch, intfName1=\"s1\", intfName2='r1-p4switch')\n self.addLink('r2', p4_switch, intfName1=\"s1\", intfName2='r2-p4switch')\n self.addLink('r3', p4_switch, intfName1=\"s1\", intfName2='r3-p4switch')\n self.addLink('r4', p4_switch, intfName1=\"s1\", intfName2='r4-p4switch')", "def __init__(self, scene: Scene):\n super(SceneGUI, self).__init__()\n\n self.scene = scene # save instance of Scene class to this object\n if scene.photons.size == 0:\n raise(Exception, \"no data stored in scene\")\n\n # QImage require data to be 32 bit aligned. Thus, we need to make sure out_size is even\n out_size = (round(scene.n_rows * 150/scene.n_cols)*2, 300)\n self.image = imresize(scene.srgb, out_size, interp='nearest')\n\n # set status bar\n self.statusBar().showMessage(\"Ready\")\n\n # set menu bar\n menu_bar = self.menuBar()\n menu_file = menu_bar.addMenu(\"&File\")\n menu_plot = menu_bar.addMenu(\"&Plot\")\n\n # add load scene to file menu\n load_scene = QtGui.QAction(\"Load Scene\", self)\n load_scene.setStatusTip(\"Load scene from file\")\n load_scene.triggered.connect(self.menu_load_scene)\n menu_file.addAction(load_scene)\n\n # add save scene to file menu\n save_scene = QtGui.QAction(\"Save Scene\", self)\n save_scene.setStatusTip(\"Save scene to file\")\n save_scene.setShortcut(\"Ctrl+S\")\n save_scene.triggered.connect(self.menu_save_scene)\n menu_file.addAction(save_scene)\n\n # add illuminant energy to plot menu\n plot_il_energy = QtGui.QAction(\"Illuminant (Energy)\", self)\n plot_il_energy.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_energy.triggered.connect(lambda: self.scene.plot(\"illuminant energy\"))\n menu_plot.addAction(plot_il_energy)\n\n # add illuminant photons to plot menu\n plot_il_quanta = QtGui.QAction(\"Illuminant (Photons)\", self)\n plot_il_quanta.setStatusTip(\"Plot spectra power distribution of scene illuminant\")\n plot_il_quanta.triggered.connect(lambda: self.scene.plot(\"illuminant photons\"))\n menu_plot.addAction(plot_il_quanta)\n\n # set up left panel\n left_panel = self.init_image_panel()\n\n # set up right panel\n right_panel = self.init_control_panel()\n\n splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)\n splitter.addWidget(left_panel)\n splitter.addWidget(right_panel)\n\n QtGui.QApplication.setStyle(QtGui.QStyleFactory().create('Cleanlooks'))\n\n widget = QtGui.QWidget()\n hbox = QtGui.QHBoxLayout(widget)\n hbox.addWidget(splitter)\n\n self.setCentralWidget(widget)\n\n # set size and put window to center of the screen\n self.resize(600, 400)\n qr = self.frameGeometry()\n qr.moveCenter(QtGui.QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n # set title and show\n self.setWindowTitle(\"Scene GUI: \" + scene.name)\n self.show()", "def importer():\n pass", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def main(args):\n root = tkinter.Tk()\n root.title(\"OGRE Editor\")\n app = App(root)\n for path in args:\n app.load_signal_file(path)\n root.mainloop()", "def initialize(self, num_agents: int) -> None:\n object_loc_main = config[\"base\"][\"object_location\"]\n if experiment == \"stage2.0\":\n self.objects.add_object(file = \"experiments/flocking/images/redd.png\", pos = object_loc_main, scale = [800, 800], obj_type = \"obstacle\")\n object_loc = config[\"first_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"second_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n elif experiment == \"stage1\":\n self.objects.add_object(file=\"experiments/flocking/images/redd.png\", pos=object_loc_main, scale=[800, 800],\n obj_type=\"obstacle\")\n object_loc = config[\"center_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n elif experiment == \"stage2.1\":\n self.objects.add_object(file=\"experiments/flocking/images/redd.png\", pos=object_loc_main, scale=[800, 800],\n obj_type=\"obstacle\")\n object_loc = config[\"first_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"second_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc2.png\", pos=object_loc, scale=[225, 225], obj_type=\"site\"\n )\n elif experiment == \"stage3\":\n self.objects.add_object(file=\"experiments/flocking/images/redd.png\", pos=object_loc_main, scale=[1000, 1000],\n obj_type=\"obstacle\")\n object_loc = config[\"first_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"second_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"upper_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"lower_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n elif experiment == \"stage3.1\":\n self.objects.add_object(file=\"experiments/flocking/images/redd.png\", pos=object_loc_main, scale=[800, 800],\n obj_type=\"obstacle\")\n object_loc = config[\"first_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"second_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc1.png\", pos=object_loc, scale=[200, 200], obj_type=\"site\"\n )\n object_loc = config[\"upper_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc2.png\", pos=object_loc, scale=[225, 225], obj_type=\"site\"\n )\n object_loc = config[\"lower_circle\"][\"object_location\"]\n self.objects.add_object(\n file=\"experiments/aggregation/images/greyc2.png\", pos=object_loc, scale=[225, 225], obj_type=\"site\")\n\n\n min_x, max_x = area(object_loc_main[0], 1000)\n min_y, max_y = area(object_loc_main[1], 1000)\n\n # add agents to the environment\n for index, agent in enumerate(range(num_agents)):\n coordinates = generate_coordinates(self.screen)\n while (\n coordinates[0] >= max_x\n or coordinates[0] <= min_x\n or coordinates[1] >= max_y\n or coordinates[1] <= min_y\n ):\n coordinates = generate_coordinates(self.screen)\n\n self.add_agent(Cockroach(pos=np.array(coordinates), v=None, cockroach=self, index=index))", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def main():\n run_it = scene.Control()\n state_dict = {\"TITLE\" : title.Title(),\n \"INTRO\" : cutscene.Cutscene0(),\n \"GAMEPLAY\" : gameplay.gamePlay(),\n \"ENDING\" : cutscene.Cutscene1()\n }\n run_it.setup_states(state_dict, \"TITLE\")\n run_it.main()", "def loadMultiple(method, *args):\n\n ### Declaring attributes\n selectedCurve = selectedMesh = None\n minRangeX = minRangeY = minRangeZ = maxRangeX = maxRangeY = maxRangeZ = 0\n selectedObjects = []\n\n ### Query UI values\n # Choise between standin / assembly\n selectedRadio = cmds.radioCollection(loadMethodRadio, query=True, select=True)\n # List of all asset icons on UI\n objectIconsList = cmds.layout(objectScroll, query=True, childArray=True)\n # Amount of copies\n buildingAmount = cmds.intSliderGrp(SpawnObjectsTab.BuildingAmount, query=True, value=True)\n # Deviation from original rotation\n rotationVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomRotation, query=True, value=True)\n # Deviation from original scale\n scaleVariation = cmds.floatSliderGrp(SpawnObjectsTab.RandomScale, query=True, value=True)\n\n ### Iterate over each asset icon\n for obj in objectIconsList:\n\n # Append to list if the asset is selected\n isSelected = cmds.iconTextCheckBox(obj, query=True, value=True)\n\n if isSelected:\n selectedObjects.append(cmds.iconTextCheckBox(obj, query=True, label=True))\n\n # Exit the function if no asset is selected\n if not selectedObjects:\n return\n \n # Reference to the function that will scatter the copies\n scatteringFunction = None\n\n ### The user chose \"curve\"\n if method == \"curve\":\n \n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnCurve\n\n # Get curve reference\n selectedCurve = cmds.ls(selection=True)\n if not selectedCurve:\n return\n selectedCurve = selectedCurve[0]\n\n ### The user chose \"range\"\n if method == \"range\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnRange\n\n # Query minimum values from floatField\n minValues = cmds.floatFieldGrp(SpawnObjectsTab.MinimumField, query=True, value=True)\n minRangeX, minRangeY, minRangeZ = minValues[0], minValues[1], minValues[2]\n # Query maximum values from floatField\n maxValues = cmds.floatFieldGrp(SpawnObjectsTab.MaximumField, query=True, value=True)\n maxRangeX, maxRangeY, maxRangeZ = maxValues[0], maxValues[1], maxValues[2]\n\n ### The user chose \"mesh\"\n if method == \"mesh\":\n\n # Set function from ObjectScattering.py\n scatteringFunction = ObjScatter.scatterOnMesh\n\n # Get reference of selected object\n selectedMesh = cmds.ls(selection=True)\n if not selectedMesh:\n return\n selectedMesh = selectedMesh[0]\n\n # Create group for the spawned copies \n finalGroup = cmds.group(name=\"CurveAssetGroup\", empty=True)\n cmds.select(clear=True)\n\n ### Iterate over the generated positions of the function with given parameters\n # scatteringFunction is a reference to a function in ObjectScattering.py\n # these functions are generators, they yield a value and we can iterate\n # to get the next value generated.\n for position in scatteringFunction(objectCount=buildingAmount, curve=selectedCurve,\n minX=minRangeX, minY=minRangeY, minZ=minRangeZ, maxX=maxRangeX, maxY=maxRangeY, maxZ=maxRangeZ,\n mesh=selectedMesh):\n \n # Randomly instance an asset from the selectedObjects list\n asset = AssetIcon(random.choice(selectedObjects))\n loadedAssetNode = None\n\n # Create copy based on the mode selected by the user\n if \"standin\" in selectedRadio:\n loadedAssetNode = asset.loadArnoldAsset()\n else: \n loadedAssetNode = asset.loadAsset()\n\n # Move this copy to the generated position\n cmds.move(position[0], position[1], position[2], loadedAssetNode, absolute=True)\n\n # If there is a fourth index on the position, that means we have rotation info\n # use that info to rotate the asset.\n # It is used to match an objects rotation to a face normal.\n if len(position) == 4:\n cmds.rotate(position[3][0], position[3][1], position[3][2], loadedAssetNode, absolute=True)\n \n # Add random rotation\n angle = random.uniform(-rotationVariation, rotationVariation)\n cmds.rotate(angle, loadedAssetNode, y=True, relative=True, objectSpace=True)\n\n # Add random scale\n newScale = random.uniform(1, 1+scaleVariation)\n cmds.scale(newScale, newScale, newScale, loadedAssetNode, absolute=True)\n\n #cmds.FreezeTransformations(loadedAssetNode)\n\n # Parent copy to group\n cmds.parent(loadedAssetNode, finalGroup)", "def handle(self, *args, **options):\n self.import_disciplines()", "def importAll(self, imdata = True, imlights = True, imaovs = True, imshaders = True, immaster = True, asset = '', searchAndReplace = ['',''] ):\n\t\tif immaster:\n\t\t\tself.importMasterSettings()\n\t\tif imlights and self.lightPath.exists:\n\t\t\tself.importLights( asset, searchAndReplace )\n\t\tif imaovs and self.aovsPath.exists:\n\t\t\tself.importAovs()\n\t\tif imshaders and self.shaderPath.exists:\n\t\t\tself.importShaders()\n\t\tif imdata and self.dataPath.exists:\n\t\t\tself.importData( asset, searchAndReplace )", "def main():\n get_engine(onnx_file_path, engine_file_path)", "def assign_aov(self, aov: AOV):\n\t\tif aov not in self.assigned_aovs:\n\t\t\tfor material in self.materials:\n\t\t\t\tshader_node_tree = material.node_tree\n\t\t\t\tassert shader_node_tree is not None, \"Material must have a node tree\"\n\t\t\t\taov.add_to_shader(shader_node_tree)\n\n\t\tself.assigned_aovs.append(aov)", "def import_workspace( ws , objects):\n\n if not isinstance( objects, list ):\n objects = [objects,]\n\n ## NOTE getattr is needed to escape python keyword import\n for o in objects:\n getattr( ws, \"import\") ( o )", "def import_workspace( ws , objects):\n\n if not isinstance( objects, list ):\n objects = [objects,]\n\n ## NOTE getattr is needed to escape python keyword import\n for o in objects:\n getattr( ws, \"import\") ( o )", "def _connectWakeAndFoamToOcean(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Connecting fluid textures to ocean shader....', verbose = False)\r\n\r\n\t\t####################################################\r\n\t\t## Straight up connection if no interactive is found.\r\n\t\tif cmds.objExists(CONST.OCEANDISPSHADER) and cmds.objExists(CONST.WAKE_FLUID_SHAPENODE) and cmds.objExists(CONST.FOAM_FLUID_SHAPENODE):\r\n\t\t\ttry:\r\n\t\t\t\tcmds.connectAttr(\"%s.outAlpha\" % CONST.WAKE_FLUID_SHAPENODE, \"%s.waveHeightOffset\" % CONST.OCEANDISPSHADER, force = True)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\t\ttry:\r\n\t\t\t\tcmds.connectAttr(\"%s.outAlpha\" % CONST.FOAM_FLUID_SHAPENODE, \"%s.foamOffset\" % CONST.OCEANDISPSHADER, force = True)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\t###########################################\r\n\t\t####### INTERACTIVE STUFFF ################\r\n\t\t### Now check for interactive caches and blend these\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Looking for interactive anim fluids now...', verbose = False)\r\n\t\tinprogressBar.updateProgress(percent = 76, doingWhat = 'Looking for interactive caches..')\r\n\r\n\t\tgetAnimVersionFolders = tk.paths_from_template(templateFile, {'Step' : 'Anm', 'id' : id, 'Shot' : shotNum})\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'getAnimVersionFolders: %s' % getAnimVersionFolders, verbose = False)\r\n\r\n\t\t## now find the highest version folder number\r\n\t\thighestVersionFolder = r'%s' % max(getAnimVersionFolders)\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'highestVersionFolder...%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\thighestVersionFolder = highestVersionFolder.replace('\\\\', '/')\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'highestVersionFolder replaced \\\\ with /...\\n%s' % highestVersionFolder, verbose = False)\r\n\r\n\t\tversionNumber = highestVersionFolder.split('/')[-1]\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'versionNumber: %s' % versionNumber, verbose = False)\r\n\r\n\t\tlistCacheFiles = os.listdir(highestVersionFolder)\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'listCacheFiles...\\n%s' % listCacheFiles, verbose = False)\r\n\r\n\t\tinteractiveCaches = {}\r\n\t\tfor each in listCacheFiles:\r\n\t\t\tif each.endswith('.xml'): ## the ocean shader preset saved out\r\n\t\t\t\tif CONST.WAKE_FLUID_SHAPENODE in each:\r\n\t\t\t\t\tinteractiveCaches[CONST.WAKE_FLUID_SHAPENODE] = '%s/%s' % (highestVersionFolder, each)\r\n\t\t\t\telse:\r\n\t\t\t\t\tinteractiveCaches[CONST.FOAM_FLUID_SHAPENODE] = '%s/%s' % (highestVersionFolder, each)\r\n\r\n\t\tif interactiveCaches:\r\n\t\t\tfluidCaches.mergeFluidCaches(interactiveFoamXML = interactiveCaches[CONST.FOAM_FLUID_SHAPENODE], interactiveWakeXML = interactiveCaches[CONST.WAKE_FLUID_SHAPENODE])\r\n\r\n\t\tdebug(app = self.app, method = '_connectWakeAndFoamToOcean', message = 'Ocean connected....', verbose = False)", "def __create_scene(self):\n\n print 'creating a scene'\n # create scenegraph by the ifgi scene parser\n _infilepath = '../../sampledata/cornel_box.ifgi'\n # _infilepath = '../../sampledata/one_tri_full.ifgi'\n ifgireader = IfgiSceneReader.IfgiSceneReader()\n if(not ifgireader.read(_infilepath)):\n raise StandardError, ('load file [' + _infilepath + '] failed.')\n\n # add a new scene\n # A ifgi file may have many cameras, but only default camera\n # is handled.\n cam_dict = ifgireader.camera_dict_dict['default']\n\n assert(self.__ifgi_cpp_render_core != None)\n self.__ifgi_cpp_render_core.create_scene(ifgireader.material_dict_list,\\\n ifgireader.geometry_dict_list,\\\n cam_dict)\n # check the camera correctly pushed\n # print cam_dict\n # dir(ifgi_cpp_render_core)\n # ret_cam_dict = ifgi_cpp_render_core.get_camera_pydict()\n # print ret_cam_dict\n\n # self.__scenegraph.update_all_bbox()\n # -- now all primitive (TriMesh) can look up the material\n\n # # added RGBA buffer and Hit buffer to the current camera.\n # imgsz = (self.__image_xsize, self.__image_ysize, 4)\n # cur_cam.set_film('RGBA', Film.ImageFilm(imgsz, 'RGBA'))\n # # cur_cam.print_obj()", "def init():\n \n # General parameters\n vect_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/' # graphs directory\n csv_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/nodes_for_tracking.csv' # csv file \n dest_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/' # output directory\n verbose = True\n main_params = [vect_path, csv_path, dest_path, verbose]\n \n # Linking parameters\n createCSV = True \n forced_matching = True\n search_range = 10\n memory = 3\n adaptive_stop = 5 \n link_params = [createCSV, forced_matching, search_range, memory, \n adaptive_stop]\n \n # Tracking check parameters\n check = True # True to create a check image\n img_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking2/MosaicTest_t070.jpg' # image file on which to draw\n size = 1 # size of the nodes drawing\n check_params = [check, img_path, size]\n \n return main_params, link_params, check_params", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def convert_realia():\n local('cd import_scripts;../bin/python import_realia.py')", "def __init__(self):\r\n self.label = \"Batch OVL to Feature\"\r\n self.description = \"Batch OVL to Feature searches a folder for OVL files from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def load(self):", "def loadBaseScene(self, force=False):\n logger.debug(\"Func: loadBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.path.isfile(absSceneFile):\n nuke.scriptOpen(absSceneFile)\n return 0\n else:\n msg = \"File in Scene Manager database doesnt exist\"\n self._exception(201, msg)\n return -1, msg", "def run_vorpastat(self, *options):\n\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpastat\", args)", "def scene_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n thumbnails_path = get_directory('icons')\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if AM.scene_name not in thumb_list or AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n if AM.scene_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n \r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n\r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n \r\n if AM.scene_name and ((AM.scene_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(AM.scene_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n box.prop(AM, \"scene_name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def importShaders(self, namespace=':'):\n self.logger.info(\"Import Shaders\")\n\n if self.data['abcShadersAttr']:\n\n abcfile = self.data['abcShadersAttr']\n \n # shotgun query for maya file\n mayafile = find_shader_package_from_shader_file(file_path=abcfile, file_type='ma')\n if mayafile != {}:\n mayafile = mayafile['ma']\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n localfile = abcfile.replace('.abc', '.ma')\n if os.path.isfile(localfile):\n mayafile = localfile\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n\n if os.path.isfile(mayafile):\n try: \n imported_shaders = cmds.file(mayafile, i=True, returnNewNodes=True, renameAll=True, mergeNamespacesOnClash=True, namespace=namespace)\n self.setAttr(\"abcShaders\", \"\")\n self.logger.debug(\"Imported under %s namespace\" % namespace)\n\n # reset selection back to alembicHolder\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Imported : %s\" % self.data['abcShadersAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Json Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.abcShadersAttr\" % self.data['shapeNode'])\n return False", "def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()", "def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")", "def afterLoadSceneObject(self):\n\t\tpass", "def setup_game(self):", "def start_import_tool(self):\n apps.albumsmatcher.MainFrame(app=self.app)", "def __init__(self):\n self.model = gameModel.Model()\n self.view = gameView.View()", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def LoadAnt():\n return vtkInterface.PolyData(antfile)", "def __init__(self):\n self.brainstate = {}\n\n self.export = ['run_ai']", "def openMayaScene(self, *arg, **keys):\n mode = Mode(keys.get('show', None), keys.get('sequence', None))\n mayaSceneFile = keys.get(\"mayaSceneFile\")\n if not mayaSceneFile:\n recipePath = mode.get(Recipe.XML_FILE, keys)\n recipe = Recipe.recipeFromFile(recipePath)\n mayaSceneFile = recipe.getMayaFile()\n\n if not mayaSceneFile:\n return\n\n mayaCommand = mode.get(\"[mayaCommand]\", keys)\n mayaCommand += \" \" + mayaSceneFile + \"&\"\n OSUtils.run(mayaCommand)\n return", "def _import():\n global webbrowser, contextlib, yaml, psutil, snakemake, logger, setup_logger, xdg, datetime, _logging\n import yaml\n import psutil\n import webbrowser\n import contextlib\n import datetime\n\n import snakemake\n from snakemake.logging import logger, setup_logger, _logging\n import xdg" ]
[ "0.7213218", "0.63318026", "0.59511834", "0.5773653", "0.5763489", "0.5640468", "0.5608729", "0.5585865", "0.55745476", "0.5565132", "0.5552705", "0.5474964", "0.5472537", "0.5469946", "0.5382948", "0.5356013", "0.5355673", "0.5343927", "0.5336113", "0.53249675", "0.5310616", "0.53012156", "0.52845025", "0.5273526", "0.5262926", "0.52469516", "0.52268344", "0.5213845", "0.52136564", "0.52130127", "0.5210879", "0.52085835", "0.5208122", "0.5206048", "0.52058077", "0.5196136", "0.51822495", "0.517832", "0.5173361", "0.5172386", "0.5171183", "0.5159355", "0.51571953", "0.5150651", "0.51482385", "0.51372015", "0.5120188", "0.51089376", "0.5098114", "0.50802916", "0.50682235", "0.5066497", "0.5053157", "0.5040267", "0.5038028", "0.5033956", "0.502863", "0.50276315", "0.50180244", "0.5017492", "0.50166", "0.5012992", "0.50020754", "0.49909148", "0.49841967", "0.49762663", "0.49754256", "0.4971252", "0.49650484", "0.49642813", "0.4962937", "0.49248666", "0.49209398", "0.49209398", "0.49191076", "0.4917753", "0.4917641", "0.49042255", "0.4899209", "0.48979482", "0.48860082", "0.48831177", "0.48804867", "0.48790553", "0.48771495", "0.4869854", "0.4868925", "0.48678318", "0.48581252", "0.4845722", "0.48452634", "0.48428893", "0.4840006", "0.48334917", "0.48334917", "0.48289743", "0.48281026", "0.4826227", "0.4815729", "0.47997394" ]
0.7212844
1
import shaders into scene
def importShaders(self): if self.shaderPath.exists: self.shaderPath.imp()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n result = glGetShaderiv(vertex_shader, GL_COMPILE_STATUS)\n if result:\n print('Vertex shader compilation successful.')\n else:\n print('Vertex shader compilation FAILED:')\n print(glGetShaderInfoLog(vertex_shader))\n sys.exit(-1)\n\n fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fragment_shader, open('shaders/fs-phong-interp.c','r').read())\n glCompileShader(fragment_shader)\n result = glGetShaderiv(fragment_shader, GL_COMPILE_STATUS)\n if result:\n print('Fragment shader compilation successful.')\n else:\n print('Fragment shader compilation FAILED:')\n print(glGetShaderInfoLog(fragment_shader))\n sys.exit(-1)\n\n shaders = glCreateProgram()\n glAttachShader(shaders,vertex_shader)\n glAttachShader(shaders,fragment_shader)\n glLinkProgram(shaders)", "def bs_importShaders(shaderPath, jsonPath):\n # import shaders.\n bs_mayaFile.bs_importFile(shaderPath)\n # read shader data from json file.\n with open(jsonPath) as json_data:\n shaderData = json.load(json_data)\n print shaderData\n # apply shaders.\n for each in shaderData.keys():\n # for x in shaderData[each]:\n # pm.select(shaderData[each][x],r=True)\n pm.select(shaderData[each], r=True)\n pm.windows.hyperShade(a=each)\n bs_qui.bs_displayMessage('success', 'shader import success.')\n return True", "def convert_shaders(self):\n raise NotImplementedError()", "def importShaders(self, namespace=':'):\n self.logger.info(\"Import Shaders\")\n\n if self.data['abcShadersAttr']:\n\n abcfile = self.data['abcShadersAttr']\n \n # shotgun query for maya file\n mayafile = find_shader_package_from_shader_file(file_path=abcfile, file_type='ma')\n if mayafile != {}:\n mayafile = mayafile['ma']\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n localfile = abcfile.replace('.abc', '.ma')\n if os.path.isfile(localfile):\n mayafile = localfile\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n\n if os.path.isfile(mayafile):\n try: \n imported_shaders = cmds.file(mayafile, i=True, returnNewNodes=True, renameAll=True, mergeNamespacesOnClash=True, namespace=namespace)\n self.setAttr(\"abcShaders\", \"\")\n self.logger.debug(\"Imported under %s namespace\" % namespace)\n\n # reset selection back to alembicHolder\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Imported : %s\" % self.data['abcShadersAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Json Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.abcShadersAttr\" % self.data['shapeNode'])\n return False", "def loadShader(shaderpath, shadername, vertexFormatList=None, fragmentFormatlist=None):\n fragment = Shader(shaderpath + shadername + \".fsh\", FRAGMENT, True, fragmentFormatlist)\n vertex = Shader(shaderpath + shadername + \".vsh\", VERTEX, True, vertexFormatList)\n return ShaderProgram(vertex, fragment, True)", "def use(self):\r\n opengles.glUseProgram(self.program)", "def _reload_shader(self):\n self.render_pipeline.reload_shaders()\n\n self.render_pipeline.set_effect(self.terrain.get_node(), \"effects/terrain.yaml\", {\n \"render_gbuffer\": True,\n \"render_shadows\": False,\n\n })\n\n self.render_pipeline.set_effect(self.terrain_shadow.get_node(), \"effects/terrain_shadow.yaml\", {\n \"render_gbuffer\": False,\n \"render_shadows\": True,\n }, 5000)", "def _load_opengl(self):\r\n pass", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def import_scene(file_path):\n\n pass", "def link_shaders(*shaders):\n program = gl.glCreateProgram()\n for shader in shaders:\n gl.glAttachShader(program, shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def set_shaders(self, vert, frag):\n self._linked = False\n # Create temporary shader objects\n vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)\n # For both vertex and fragment shader: set source, compile, check\n for code, handle, type_ in [(vert, vert_handle, 'vertex'), \n (frag, frag_handle, 'fragment')]:\n gl.glShaderSource(handle, code)\n gl.glCompileShader(handle)\n status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS)\n if not status:\n errors = gl.glGetShaderInfoLog(handle)\n errormsg = self._get_error(code, errors, 4)\n raise RuntimeError(\"Shader compilation error in %s:\\n%s\" % \n (type_ + ' shader', errormsg))\n # Attach shaders\n gl.glAttachShader(self._handle, vert_handle)\n gl.glAttachShader(self._handle, frag_handle)\n # Link the program and check\n gl.glLinkProgram(self._handle)\n if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS):\n raise RuntimeError('Program linking error:\\n%s'\n % gl.glGetProgramInfoLog(self._handle))\n # Now we can remove the shaders. We no longer need them and it\n # frees up precious GPU memory:\n # http://gamedev.stackexchange.com/questions/47910\n gl.glDetachShader(self._handle, vert_handle)\n gl.glDetachShader(self._handle, frag_handle)\n gl.glDeleteShader(vert_handle)\n gl.glDeleteShader(frag_handle)\n # Now we know what variables will be used by the program\n self._unset_variables = self._get_active_attributes_and_uniforms()\n self._handles = {}\n self._known_invalid = set()\n self._linked = True", "def transfer_shaders(source, target):\n if isinstance(source, pm.nt.Transform):\n source_shape = source.getShape()\n else:\n source_shape = source\n\n if isinstance(target, pm.nt.Transform):\n target_shape = target.getShape()\n else:\n target_shape = target\n\n # get the shadingEngines\n shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)\n\n data_storage = []\n\n # get the assigned faces\n for shading_engine in shading_engines:\n faces = pm.sets(shading_engine, q=1)\n for faceGroup in faces:\n str_face = str(faceGroup)\n # replace the objectName\n new_face = \\\n str_face.replace(source_shape.name(), target_shape.name())\n data_storage.append((shading_engine.name(), new_face))\n\n for data in data_storage:\n shading_engine = data[0]\n new_face = data[1]\n pm.select(new_face)\n # now assign the newFaces to the set\n pm.sets(shading_engine, fe=1)", "def _on_unload_scene_shaders(self):\n\n artellapipe.ShadersMgr().unload_shaders()", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n sys.exit(1)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n #print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def addShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def setShader(self, *args):\n return _osgAnimation.RigTransformHardware_setShader(self, *args)", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def convert_shaders(convert, shaders):\n \n # New version of the shaders\n out = []\n \n if convert == 'es2':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n has_prec_float = False\n has_prec_int = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n if line.startswith('#version'):\n has_version = True\n continue\n if line.startswith('precision '):\n has_prec_float = has_prec_float or 'float' in line\n has_prec_int = has_prec_int or 'int' in line\n lines.append(line.rstrip())\n # Write\n # BUG: fails on WebGL (Chrome)\n # if True:\n # lines.insert(has_version, '#line 0')\n if not has_prec_float:\n lines.insert(has_version, 'precision highp float;')\n if not has_prec_int:\n lines.insert(has_version, 'precision highp int;')\n # BUG: fails on WebGL (Chrome)\n # if not has_version:\n # lines.insert(has_version, '#version 100')\n out.append('\\n'.join(lines))\n \n elif convert == 'desktop':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n has_version = has_version or line.startswith('#version')\n if line.startswith('precision '):\n line = ''\n for prec in (' highp ', ' mediump ', ' lowp '):\n line = line.replace(prec, ' ')\n lines.append(line.rstrip())\n # Write\n if not has_version:\n lines.insert(0, '#version 120\\n')\n out.append('\\n'.join(lines))\n \n else:\n raise ValueError('Cannot convert shaders to %r.' % convert)\n \n return tuple(out)", "def add_vertex_main(self, *args, **kwargs):\n kwargs['shader'] = 'vertex'\n self.add_main(*args, **kwargs)", "def init(filename):\n global trackball, flashlight, vertex_buffer, normal_buffer, color_buffer, colors, vertices, normals\n\n # initialize quaternions for the light and trackball\n flashlight = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n trackball = quat.for_rotation(0.0,vector(1.0,0.0,0.0))\n\n # read the .OBJ file into VBOs\n scene.read(filename)\n vertices,normals,colors = scene.compile()\n \n vertex_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(vertices)*4, \n (c_float*len(vertices))(*vertices), GL_STATIC_DRAW)\n\n normal_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(normals)*4, \n (c_float*len(normals))(*normals), GL_STATIC_DRAW)\n\n color_buffer = glGenBuffers(1)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n\n\n # set up the object shaders\n init_shaders()\n\n glEnable (GL_DEPTH_TEST)", "def compileShaders(self):\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.activeShader = None\n\n fslgl.glmesh_funcs.compileShaders(self)", "def initializeGL(self):\n # background color\n gl.glClearColor(0.8, 0.8, 0.8, 0)\n # Make initial data array.\n # compile the vertex shader\n vs = compile_shader(VERTEX, gl.GL_VERTEX_SHADER)\n # compile the geometry shader\n gs = compile_shader(GEOMETRY, gl.GL_GEOMETRY_SHADER)\n # compile the fragment shader\n fs = compile_shader(FRAGMENT, gl.GL_FRAGMENT_SHADER)\n # Link the programs.\n self.render_program = link_shaders(vs, gs, fs)\n # Compile the compute shader\n cs = compile_shader(COMPUTE, gl.GL_COMPUTE_SHADER)\n # Create the compute shader buffers.\n self.makeBuffers()\n #self.vbo = glvbo.VBO(self.attributes)\n self.vbo = gl.glGenBuffers(1)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.attributes.nbytes,\n self.attributes, gl.GL_DYNAMIC_COPY)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n self.ssbo = gl.glGenBuffers(1)\n gl.glBindBufferBase(gl.GL_SHADER_STORAGE_BUFFER, 1, self.ssbo)\n gl.glBufferData(gl.GL_SHADER_STORAGE_BUFFER, self.velocities.nbytes,\n self.velocities, gl.GL_DYNAMIC_COPY)\n self.compute_program = link_shaders(cs)", "def shaderPath(self):\n\t\treturn mfl.mayaFile( self._path + '/shaders.ma' )", "def compileShaders(self):\n raise NotImplementedError('compileShaders must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def shaders(self):\n\n shaders = []\n shaders.extend(self._verts)\n shaders.extend(self._frags)\n shaders.extend(self._geoms)\n return shaders", "def addShaderFromSourceCode(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def bs_getShaders(obj):\n pm.select(obj)\n pm.windows.hyperShade(shaderNetworksSelectMaterialNodes=True)\n return pm.ls(sl=True) # Returns all shaders associated with the object (shape, face etc)", "def compile(self, mode, shader):\n holder = self.holderDepend( mode.cache.holder(self,None) )\n # TODO: depend on shader.material as well...\n # TODO: the compiled shader needs to depend on *everything* \n # down the set of objects...\n program = glCreateProgram()\n holder.data = program\n subShaders = []\n for shader in self.shaders:\n # TODO: cache links...\n subShader = shader.compile()\n if subShader:\n glAttachShader(program, subShader )\n subShaders.append( subShader )\n elif shader.source:\n log.warn( 'Failure compiling: %s %s', shader.compileLog, shader.url or shader.source )\n if len(subShaders) == len(self.shaders):\n glLinkProgram(program)\n glUseProgram( program )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.bind( self, mode, i ):\n i += 1\n \n glValidateProgram( program )\n validation = glGetProgramiv( program, GL_VALIDATE_STATUS )\n if validation == GL_FALSE:\n self.compileLog += \"\"\"Validation failure (%s): %s\"\"\"%(\n validation,\n glGetProgramInfoLog( program ),\n )\n program = False \n else:\n link_status = glGetProgramiv( program, GL_LINK_STATUS )\n if link_status == GL_FALSE:\n self.compileLog += \"\"\"Link failure (%s): %s\"\"\"%(\n link_status,\n glGetProgramInfoLog( program ),\n )\n program = False\n for subShader in subShaders:\n glDeleteShader( subShader )\n holder.data = program\n return program\n else:\n log.debug( 'Not done loading shader source yet' )\n holder.data = 0\n return None", "def get_shader_codes(self):\n vs = VS_TEMPLATE\n fs = FS_TEMPLATE\n \n # Shader headers\n vs_header = self.get_header('vertex')\n fs_header = self.get_header('fragment')\n \n # Varyings\n for varying in self.varyings:\n s1, s2 = get_varying_declarations(varying)\n vs_header += s1\n fs_header += s2\n \n # vs_header += \"\".join(self.vs_headers)\n # fs_header += \"\".join(self.fs_headers)\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_HEADER%\", vs_header)\n fs = fs.replace(\"%FRAGMENT_HEADER%\", fs_header)\n \n # Vertex and fragment main code\n vs_main = self.get_main('vertex')\n fs_main = self.get_main('fragment')\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_MAIN%\", vs_main)\n fs = fs.replace(\"%FRAGMENT_MAIN%\", fs_main)\n \n # frag color or frag data\n if self.fragdata is None:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragColor = out_color;\"\"\")\n else:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragData[%d] = out_color;\"\"\" % self.fragdata)\n \n # Make sure there are no Windows carriage returns\n vs = vs.replace(b\"\\r\\n\", b\"\\n\")\n fs = fs.replace(b\"\\r\\n\", b\"\\n\")\n \n # OLDGLSL does not know the texture function\n if not OLDGLSL:\n fs = fs.replace(\"texture1D(\", \"texture(\" % 2)\n fs = fs.replace(\"texture2D(\", \"texture(\" % 2)\n \n # set default color\n fs = fs.replace('%DEFAULT_COLOR%', str(self.default_color))\n \n # replace GLSL version header\n vs = vs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n fs = fs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n \n # replace GLSL precision header\n vs = vs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n fs = fs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n \n return vs, fs", "def get_shaders(self, nodes):\n shaders = []\n # Fill the assigned shader list\n for node in nodes:\n shader = mc.listConnections(\"{0}.instObjGroups[0]\".format(node))\n if shader is not None:\n shaders.append(shader)\n else:\n shaders.append([])\n return shaders", "def _prepare_gl(self):\n # init gl\n shader = Shader()\n shader.attachShader(GL_VERTEX_SHADER, VERTEX_SHADER)\n shader.attachShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER)\n shader.linkProgram()\n self.shader = shader\n\n self._gl_uniforms = {}\n # cache uniform locations (much faster)\n self._gl_uniforms['tex'] = self._uloc('tex')\n self._gl_uniforms['color'] = self._uloc('color')\n self._gl_uniforms['mat_projection'] = self._uloc('mat_projection')\n self._gl_uniforms['mat_modelview'] = self._uloc('mat_modelview')\n self._gl_uniforms['mat_real_projection'] = self._uloc('mat_real_projection')\n self.vao_id = glGenVertexArrays(1)\n self.vbo_id = glGenBuffers(2)", "def addShader(self, QOpenGLShader): # real signature unknown; restored from __doc__\n return False", "def update_project(_):\n\n # Compile all OSL Script nodes\n stdosl_path = path_util.get_stdosl_paths()\n compiler = asr.ShaderCompiler(stdosl_path)\n q = asr.ShaderQuery()\n for script in bpy.data.texts:\n osl_bytecode = osl_utils.compile_osl_bytecode(compiler,\n script)\n if osl_bytecode is not None:\n q.open_bytecode(osl_bytecode)\n\n node_data = osl_utils.parse_shader(q)\n\n node_name, node_category, node_classes = osl_utils.generate_node(node_data,\n AppleseedOSLScriptNode)\n\n for cls in node_classes:\n safe_register_class(cls)\n\n else:\n logger.debug(f\"appleseed: Shader {script.name} did not compile\")", "def __init__(self, shader_program):\n self.tessellate(20)\n\n self.k_ambient = np.array([0.3, 0.3, 0.21], dtype=np.float32)\n self.k_diffuse = np.array([0.4, 0.5, 0.35], dtype=np.float32)\n self.k_specular = np.array([0.3, 0.3, 0.3], dtype=np.float32)\n self.shininess = 7.0\n\n self.set_buffers(shader_program)", "def add_fragment_main(self, *args, **kwargs):\n kwargs['shader'] = 'fragment'\n self.add_main(*args, **kwargs)", "def _addShaderMenuItems(ned, node):\n pass", "def setup(self, shader_program):\n self.setup_view(shader_program)\n self.setup_projection(shader_program)", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def set_shader(self, shader):\r\n\r\n self.shader = shader\r\n for b in self.buf:\r\n b.shader = shader", "def surfaceShaderList(*args, add: name=None, remove: name=None, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def link_shader_program(vertex_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def applyShader(name, obj, color=(.5,.5,.5), sType='lambert', sSet='__none__'):\n ##print 'evaluating'\n if sSet=='__none__':\n sSet=name+'SG'\n ##print 'no SG set given'\n\n if pm.objExists(name)==0 and pm.objExists(sSet)==0:\n ##print 'creating shader'\n myShader=pm.shadingNode(sType, asShader=1, name=name)\n pm.sets(n=sSet, renderable=1, empty=1, noSurfaceShader=1)\n if sType=='surfaceShader':\n myAt='.outColor'\n else:\n myAt='.color'\n pm.connectAttr(myShader+myAt, sSet+'.surfaceShader')\n pm.setAttr(myShader+myAt, color)\n pm.sets(sSet, fe=obj)\n return name", "def link_shader_program(vertex_shader, fragment_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glAttachShader(program, fragment_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def addCacheableShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def getVertexShader(self):\n return self.vshader", "def printShader(self):\n print self.file", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def setFragmentShader(self, fragment):\n if isinstance(fragment, Shader):\n if fragment.getType() == FRAGMENT:\n self.fshader = fragment\n else:\n raise Exception(\"se esperaba un fragment shader, en cambio se paso un vertex shader\")\n else:\n raise Exception(\"el fragment shader debe ser del tipo Shader\")", "def shadingNode(*args, asLight: bool=True, asPostProcess: bool=True, asRendering: bool=True,\n asShader: bool=True, asTexture: bool=True, asUtility: bool=True,\n isColorManaged: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\", shared:\n bool=True, skipSelect: bool=True, **kwargs)->AnyStr:\n pass", "def setVertexShader(self, vertex):\n if isinstance(vertex, Shader):\n if vertex.getType() == VERTEX:\n self.vshader = vertex\n else:\n raise Exception(\"se esperaba un vertex shader, en cambio se paso un fragment shader\")\n else:\n raise Exception(\"el vertex shader debe ser del tipo Shader\")", "def dataShader(self):\n\t\treturn self._shader", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def _create(self):\n\n log.debug(\"GPU: Creating program\")\n\n # Check if program has been created\n if self._handle <= 0:\n self._handle = gl.glCreateProgram()\n if not self._handle:\n raise ValueError(\"Cannot create program object\")\n\n self._build_shaders(self._handle)\n\n log.debug(\"GPU: Linking program\")\n\n # Link the program\n gl.glLinkProgram(self._handle)\n if not gl.glGetProgramiv(self._handle, gl.GL_LINK_STATUS):\n print(gl.glGetProgramInfoLog(self._handle))\n raise ValueError('Linking error')\n\n # Activate uniforms\n active_uniforms = [name for (name,gtype) in self.active_uniforms]\n for uniform in self._uniforms.values():\n if uniform.name in active_uniforms:\n uniform.active = True\n else:\n uniform.active = False\n\n # Activate attributes\n active_attributes = [name for (name,gtype) in self.active_attributes]\n for attribute in self._attributes.values():\n if attribute.name in active_attributes:\n attribute.active = True\n else:\n attribute.active = False", "def compile_fragment_shader(source):\n fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)\n gl.glShaderSource(fragment_shader, source)\n gl.glCompileShader(fragment_shader)\n # check compilation error\n result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))\n return fragment_shader", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def _createShaderMenuItems(ned, node):\n pass", "def bs_exportShaders(shaderExpPath, jsonPath):\n # get all shading engines.\n allShdEngines = pm.ls(type='shadingEngine')\n allShdEng = list()\n [allShdEng.append(each.name()) for each in allShdEngines]\n filtShdEng = list(set(allShdEng) - {'initialParticleSE', 'initialShadingGroup'})\n # convert shading engines in PyNode.\n shadingEngines = list()\n [shadingEngines.append(pm.PyNode(each)) for each in filtShdEng]\n # get geometries where shading engine is applied.\n shadingInformation = dict()\n for each in shadingEngines:\n appliedMesh = list()\n [appliedMesh.append(x.name()) for x in each.listConnections(type='mesh')]\n shadingInformation[each.name()] = appliedMesh\n # export applied data in json file.\n with open(jsonPath, 'w') as outfile:\n json.dump(shadingInformation, outfile, indent=4)\n # select and export all shading engines.\n pm.select(shadingInformation.keys(), r=True, ne=True)\n pm.cmds.file(shaderExpPath, op=\"v=0;\", typ='mayaAscii', pr=False, es=True)\n bs_qui.bs_displayMessage('success', 'Shaders exported successful')", "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def getFragmentShader(self):\n return self.fshader", "def _load_opengl(self):\r\n opengles.glGenTextures(4, ctypes.byref(self._tex), 0)\r\n from pi3d.Display import Display\r\n if Display.INSTANCE:\r\n Display.INSTANCE.textures_dict[str(self._tex)] = [self._tex, 0]\r\n opengles.glBindTexture(GL_TEXTURE_2D, self._tex)\r\n RGBv = GL_RGBA if self.alpha else GL_RGB\r\n opengles.glTexImage2D(GL_TEXTURE_2D, 0, RGBv, self.ix, self.iy, 0, RGBv,\r\n GL_UNSIGNED_BYTE,\r\n ctypes.string_at(self.image, len(self.image)))\r\n opengles.glEnable(GL_TEXTURE_2D)\r\n opengles.glGenerateMipmap(GL_TEXTURE_2D)\r\n opengles.glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n if self.mipmap:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_LINEAR_MIPMAP_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_LINEAR)\r\n else:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,\r\n GL_MIRRORED_REPEAT)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,\r\n GL_MIRRORED_REPEAT)", "def update_shader_presets_path(shader_presets_filepath):\n shader_presets_abs_path = _path_utils.get_abs_path(shader_presets_filepath)\n\n if _shader_presets.is_library_initialized(shader_presets_abs_path):\n lprint(\"I Shader presets library is up-to date, no update will happen!\")\n return\n\n # CLEAR INVENTORY AND CACHE\n _shader_presets.clear()\n\n # ADD DEFAULT PRESET ITEM \"<none>\" INTO INVENTORY\n _shader_presets.add_section(\"<none>\", \"<none>\", \"\", None)\n\n if os.path.isfile(shader_presets_abs_path):\n\n presets_container = _pix.get_data_from_file(shader_presets_abs_path, ' ')\n\n # ADD ALL SHADER PRESET ITEMS FROM FILE INTO INVENTORY\n if presets_container:\n\n # load all supported effects from dump file of python set or dictionary, where keys represent supported effects\n # If file is not found then generate all combinations as before.\n supported_effects_dict = None\n supported_effects_path = os.path.join(_path_utils.get_addon_installation_paths()[0], \"supported_effects.bin\")\n if os.path.isfile(supported_effects_path):\n try:\n supported_effects_dict = pickle.load(open(supported_effects_path, mode=\"rb\"))\n except PermissionError:\n lprint(\"W Can't load supported effects file (persmission denied), please ensure read/write permissions for:\\n\\t %r\\n\\t \"\n \"Without supported effects file invalid combinations of shader and flavors can be created!\",\n (os.path.dirname(supported_effects_path),),\n report_warnings=1)\n else:\n lprint(\"W Supported effects file is missing! Make sure latest SCS Blender Tools is installed.\\n\\t \"\n \"Without supported effects file invalid combinations of shader and flavors can be created!\",\n report_warnings=1)\n\n # sort sections to shaders and flavors\n shaders = []\n flavors = {}\n for section in presets_container:\n if section.type == \"Shader\":\n shaders.append(section)\n elif section.type == \"Flavor\":\n flavors[section.get_prop_value(\"Type\")] = section\n\n for shader in shaders:\n unique_names = []\n shader_flavors = shader.get_prop_value(\"Flavors\")\n\n # create new preset item\n shader_preset_name = shader.get_prop_value(\"PresetName\")\n shader_preset_effect = shader.get_prop_value(\"Effect\")\n unique_names.append(\"\")\n _shader_presets.add_section(shader_preset_effect, shader_preset_name, \"\", shader)\n\n if shader_flavors:\n\n for j, flavor_types in enumerate(shader_flavors):\n\n # create new flavor item\n _shader_presets.add_flavor(shader_preset_name)\n\n new_unique_names = []\n for i, flavor_type in enumerate(flavor_types.split(\"|\")):\n\n if flavor_type not in flavors:\n lprint(\"D Flavor used by shader preset, but not defined: %s\", (flavor_type,))\n continue\n\n # create new flavor variant item (there can be more variants eg. \"BLEND_ADD|BLEND_OVER\")\n flavor_variant_name = flavors[flavor_type].get_prop_value(\"Name\")\n _shader_presets.add_flavor_variant(shader_preset_name, flavor_type, flavor_variant_name)\n\n # modify and save section as string into cache\n for unique_name in unique_names:\n\n new_unique_str = unique_name + \".\" + flavor_variant_name\n new_full_effect_name = shader_preset_effect + new_unique_str\n\n # check if this shader-flavor combination can exists, if not skip it\n if supported_effects_dict and new_full_effect_name not in supported_effects_dict:\n lprint(\"S Marking none existing effect as dirty: %r\", (new_full_effect_name,))\n is_dirty = True\n else:\n is_dirty = False\n\n section = _shader_presets.get_section(shader_preset_name, unique_name)\n\n for flavor_section in flavors[flavor_type].sections:\n\n flavor_section_tag = flavor_section.get_prop_value(\"Tag\")\n # check if current flavor section already exists in section,\n # then override props and sections directly otherwise add flavor section\n for subsection in section.sections:\n\n subsection_tag = subsection.get_prop_value(\"Tag\")\n if subsection_tag and subsection_tag == flavor_section_tag:\n\n subsection.props = flavor_section.props\n subsection.sections = flavor_section.sections\n break\n\n else:\n section.sections.append(flavor_section)\n\n new_unique_names.append(new_unique_str)\n assert section.set_prop_value(\"Effect\", shader_preset_effect + new_unique_str)\n _shader_presets.add_section(shader_preset_effect, shader_preset_name, new_unique_str, section, is_dirty=is_dirty)\n\n unique_names.extend(new_unique_names)\n\n # now as we built library it's time to clean it up of dirty items (eg. none existing effect combinations) and\n # set path from which this library was initialized\n _shader_presets.set_library_initialized(shader_presets_abs_path)\n\n update_item_in_file('Paths.ShaderPresetsFilePath', shader_presets_filepath)", "def _executeShader(self, node, threadsX, threadsY, threadsZ=1):\n sattr = node.get_attrib(ShaderAttrib)\n Globals.base.graphicsEngine.dispatch_compute(\n (threadsX, threadsY, threadsZ), sattr, Globals.base.win.get_gsg())", "def __init__(self):\n self.index = self._generateUID()\n\n DebugObject.__init__(self, \"ShadowSource-\" + str(self.index))\n ShaderStructElement.__init__(self)\n\n self.valid = False\n self.camera = Camera(\"ShadowSource-\" + str(self.index))\n self.cameraNode = NodePath(self.camera)\n self.cameraNode.reparentTo(Globals.render)\n self.resolution = 1024\n self.atlasPos = Vec2(0)\n self.doesHaveAtlasPos = False\n self.sourceIndex = 0\n self.mvp = UnalignedLMatrix4f()\n self.sourceIndex = -1\n self.nearPlane = 0.0\n self.farPlane = 1000.0\n self.converterYUR = None\n self.transforMat = TransformState.makeMat(\n Mat4.convertMat(Globals.base.win.getGsg().getInternalCoordinateSystem(),\n CSZupRight))", "def initialize(self, gl):\n\n self.waterProgram = self.linkProgram(gl, 'water')\n self.waterVBO = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)\n assert self.waterVBO.create(), \"Can't create water vertex buffer =\\\\\"\n self.waterVBO.setUsagePattern(QOpenGLBuffer.DynamicDraw)\n\n self.waterRefractionProgram = self.linkProgram(gl, 'water-refraction')\n self.refractionFramebuffer = self.createFramebuffer(gl, 512, depth=True)\n self.refractionNormalMap = self.createTexture(gl, wrapMode=QOpenGLTexture.Repeat, filename='normalmap.bmp')\n\n self.depthProgram = self.linkProgram(gl, 'depth')\n self.depthFramebuffer = self.createFramebuffer(gl, 512)\n self.depthTexture = self.createTexture(gl, self.depthFramebuffer.width(), format=QOpenGLTexture.D32F, allocate=False,\n GL_TEXTURE_COMPARE_MODE=gl.GL_COMPARE_REF_TO_TEXTURE,\n GL_TEXTURE_COMPARE_FUNC=gl.GL_LESS)\n self.depthTexture.bind()\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT32, \n self.depthFramebuffer.width(), self.depthFramebuffer.height(), \n 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, None)\n self.depthTexture.release()\n assert self.depthFramebuffer.bind()\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, self.depthTexture.textureId(), 0)\n assert self.depthFramebuffer.release()\n\n self.landscapeProgram = self.linkProgram(gl, 'landscape')\n self.landscapeVBO = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)\n assert self.landscapeVBO.create(), \"Can't create water vertex buffer =\\\\\"\n self.landscapeVBO.setUsagePattern(QOpenGLBuffer.DynamicDraw)\n\n self.heightsTexture = self.createTexture(gl, self.logicalResources.m, self.logicalResources.n, \n format=QOpenGLTexture.RG32F, filter=QOpenGLTexture.Nearest)\n \n self.updateMeshesAndHeightsTexture(gl)", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def addCacheableShaderFromSourceCode(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def __init__(self):\n self._id = GLuint()\n glGenVertexArrays(1, self._id)", "def _activate(self):\n\n log.debug(\"GPU: Activating program (id=%d)\" % self._id)\n gl.glUseProgram(self.handle)\n\n for uniform in self._uniforms.values():\n if uniform.active:\n uniform.activate()\n\n for attribute in self._attributes.values():\n if attribute.active:\n attribute.activate()", "def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def reference_scene(file_path, **kwargs):\n\n pass", "def __init__(self):\n super().__init__()\n self._do_paint = True\n self._do_process_data = True\n #opengl data\n self._dentsvertsdata = {} # dictionary that holds vertex data for all primitive and submodel combinations\n self._multFactor = 1 # maybe not the brighest idea -> this caused problems in memory allocation\n self._showBack = False\n # Shader program data\n self.program = 0\n self.normalMatrixLoc = 0\n self.vertexShader = self._vertex_shader_source()\n self.fragmentShader = self._fragment_shader_source()\n # paint device (e.g. glWin), and coresponding transforamtion matrices\n self.paintDevice = 0\n self.projMatrixLoc = 0\n self.mvMatrixLoc = 0\n # light position\n self.lightPosLoc = 0 # opengl light position\n self._light_position = QVector3D(0, 0, 100000) # vector\n #geometry manager selected/visible changed events\n geometry_manager.geometry_state_changing.connect(self.onGeometryStateChanging)\n geometry_manager.visible_geometry_changed.connect(self.onVisibleGeometryChanged)\n self._s_visible_geo_guids: set = set()\n\n # Add menu items\n self.initialize_painter_menus()\n self._color =[0.4, 1.0, 1.0, 1.0] # default color", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def glGetShaderSourceARB( baseOperation, obj ):\n length = int(glGetObjectParameterivARB(obj, GL_OBJECT_SHADER_SOURCE_LENGTH_ARB))\n if length > 0:\n source = ctypes.create_string_buffer(length)\n baseOperation(obj, length, None, source)\n return source.value.strip(_NULL_8_BYTE) # null-termination\n return ''", "def instantiate_for_spirv_args(self, testcase):\n shader, self.filename = tempfile.mkstemp(\n dir=testcase.directory, suffix=self.suffix)\n shader_object = os.fdopen(shader, 'w')\n shader_object.write(self.source)\n shader_object.close()\n return self.filename", "def updateShaderState(self):\n raise NotImplementedError('updateShaderState must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def writeAbcShaders(self, shader_out_path):\n self.logger.info(\"Export Shaders\")\n\n try:\n if not cmds.pluginInfo('abcMayaShader', loaded=True, query=True):\n cmds.loadPlugin('abcMayaShader')\n\n # make dirs\n if not os.path.isdir(os.path.dirname(shader_out_path)):\n os.makedirs(os.path.dirname(shader_out_path))\n\n # export the alembic file\n cmds.abcCacheExport(f=shader_out_path, node=self.data['shapeNode'])\n self.logger.debug(\"Exporting Alembic Shaders\")\n\n # export the maya file\n shadersSelection = self.getConnectedShaders()\n cmds.select(shadersSelection,r=True,noExpand=True)\n cmds.file(shader_out_path, f=True,options='v=0', type='mayaAscii', pr=True, es=True)\n self.logger.debug(\"Exporting Maya Shaders\")\n\n # reselect the shape\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Exported Shaders : %s\" % shader_out_path)\n return True\n\n except Exception, e:\n self.logger.error(\"Export Shaders Error : %s\" % e)\n return False", "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and keep their count in a global variable\n for exported_lights_count, ob in enumerate(lamps, start=1):\n lamp = ob.data\n\n matrix = global_matrix @ ob.matrix_world\n\n # Color is no longer modified by energy\n # any way to directly get bpy_prop_array as tuple?\n color = tuple(lamp.color)\n\n tab_write(file, \"light_source {\\n\")\n tab_write(file, \"< 0,0,0 >\\n\")\n tab_write(file, \"color srgb<%.3g, %.3g, %.3g>\\n\" % color)\n\n if lamp.type == \"POINT\":\n pass\n elif lamp.type == \"SPOT\":\n tab_write(file, \"spotlight\\n\")\n\n # Falloff is the main radius from the centre line\n tab_write(file, \"falloff %.2f\\n\" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH\n tab_write(\n file, \"radius %.6f\\n\" % ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend))\n )\n\n # Blender does not have a tightness equivalent, 0 is most like blender default.\n tab_write(file, \"tightness 0\\n\") # 0:10f\n\n tab_write(file, \"point_at <0, 0, -1>\\n\")\n if lamp.pov.use_halo:\n tab_write(file, \"looks_like{\\n\")\n tab_write(file, \"sphere{<0,0,0>,%.6f\\n\" % lamp.distance)\n tab_write(file, \"hollow\\n\")\n tab_write(file, \"material{\\n\")\n tab_write(file, \"texture{\\n\")\n tab_write(file, \"pigment{rgbf<1,1,1,%.4f>}\\n\" % (lamp.pov.halo_intensity * 5.0))\n tab_write(file, \"}\\n\")\n tab_write(file, \"interior{\\n\")\n tab_write(file, \"media{\\n\")\n tab_write(file, \"emission 1\\n\")\n tab_write(file, \"scattering {1, 0.5}\\n\")\n tab_write(file, \"density{\\n\")\n tab_write(file, \"spherical\\n\")\n tab_write(file, \"color_map{\\n\")\n tab_write(file, \"[0.0 rgb <0,0,0>]\\n\")\n tab_write(file, \"[0.5 rgb <1,1,1>]\\n\")\n tab_write(file, \"[1.0 rgb <1,1,1>]\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n tab_write(file, \"}\\n\")\n elif lamp.type == \"SUN\":\n tab_write(file, \"parallel\\n\")\n tab_write(file, \"point_at <0, 0, -1>\\n\") # *must* be after 'parallel'\n\n elif lamp.type == \"AREA\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n # Area lights have no falloff type, so always use blenders lamp quad equivalent\n # for those?\n tab_write(file, \"fade_power %d\\n\" % 2)\n size_x = lamp.size\n samples_x = lamp.pov.shadow_ray_samples_x\n if lamp.shape == \"SQUARE\":\n size_y = size_x\n samples_y = samples_x\n else:\n size_y = lamp.size_y\n samples_y = lamp.pov.shadow_ray_samples_y\n\n tab_write(\n file,\n \"area_light <%.6f,0,0>,<0,%.6f,0> %d, %d\\n\"\n % (size_x, size_y, samples_x, samples_y),\n )\n tab_write(file, \"area_illumination\\n\")\n if lamp.pov.shadow_ray_sample_method == \"CONSTANT_JITTERED\":\n if lamp.pov.use_jitter:\n tab_write(file, \"jitter\\n\")\n else:\n tab_write(file, \"adaptive 1\\n\")\n tab_write(file, \"jitter\\n\")\n\n # No shadow checked either at global or light level:\n if not scene.pov.use_shadows or (lamp.pov.shadow_method == \"NOSHADOW\"):\n tab_write(file, \"shadowless\\n\")\n\n # Sun shouldn't be attenuated. Area lights have no falloff attribute so they\n # are put to type 2 attenuation a little higher above.\n if lamp.type not in {\"SUN\", \"AREA\"}:\n if lamp.falloff_type == \"INVERSE_SQUARE\":\n tab_write(file, \"fade_distance %.6f\\n\" % (sqrt(lamp.distance / 2.0)))\n tab_write(file, \"fade_power %d\\n\" % 2) # Use blenders lamp quad equivalent\n elif lamp.falloff_type == \"INVERSE_LINEAR\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 1) # Use blenders lamp linear\n elif lamp.falloff_type == \"CONSTANT\":\n tab_write(file, \"fade_distance %.6f\\n\" % (lamp.distance / 2.0))\n tab_write(file, \"fade_power %d\\n\" % 3)\n # Use blenders lamp constant equivalent no attenuation.\n # Using Custom curve for fade power 3 for now.\n elif lamp.falloff_type == \"CUSTOM_CURVE\":\n tab_write(file, \"fade_power %d\\n\" % 4)\n\n write_matrix(file, matrix)\n\n tab_write(file, \"}\\n\")\n\n # v(A,B) rotates vector A about origin by vector B.\n file.write(\n \"#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\\n\"\n % (\n exported_lights_count,\n -ob.location.x,\n -ob.location.y,\n -ob.location.z,\n ob.rotation_euler.x,\n ob.rotation_euler.y,\n ob.rotation_euler.z,\n )\n )", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)", "def __init__(self):\r\n super(Defocus, self).__init__(\"defocus\")\r\n # load blur shader\r\n self.shader = Shader(\"defocus\")", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def load_textured_light(file, shader, tex_file=None, type=1):\n try:\n pp = assimpcy.aiPostProcessSteps\n flags = pp.aiProcess_Triangulate | pp.aiProcess_FlipUVs\n scene = assimpcy.aiImportFile(file, flags)\n except assimpcy.all.AssimpError as exception:\n print('ERROR loading', file + ': ', exception.args[0].decode())\n return []\n\n # Note: embedded textures not supported at the moment\n path = os.path.dirname(file) if os.path.dirname(file) != '' else './'\n for mat in scene.mMaterials:\n if not tex_file and 'TEXTURE_BASE' in mat.properties: # texture token\n name = os.path.basename(mat.properties['TEXTURE_BASE'])\n # search texture in file's whole subdir since path often screwed up\n paths = os.walk(path, followlinks=True)\n found = [os.path.join(d, f) for d, _, n in paths for f in n\n if name.startswith(f) or f.startswith(name)]\n assert found, 'Cannot find texture %s in %s subtree' % (name, path)\n tex_file = found[0]\n if tex_file:\n mat.properties['diffuse_map'] = Texture(file=tex_file)\n\n # prepare textured mesh\n meshes = []\n for mesh in scene.mMeshes:\n mat = scene.mMaterials[mesh.mMaterialIndex].properties\n assert mat['diffuse_map'], \"Trying to map using a textureless material\"\n attributes = [mesh.mVertices, mesh.mNormals, mesh.mTextureCoords[0]]\n mesh = TextureLightMesh(shader, mat['diffuse_map'], attributes, type, mesh.mFaces,\n k_d=mat.get('COLOR_DIFFUSE', (0.002, 0.002, 0.002)),\n k_s=mat.get('COLOR_SPECULAR', (0.001, 0.001, 0.001)),\n s=mat.get('SHININESS', 1.),\n light_dir=(0, -1, 0))\n meshes.append(mesh)\n\n size = sum((mesh.mNumFaces for mesh in scene.mMeshes))\n print('Loaded %s\\t(%d meshes, %d faces)' % (file, len(meshes), size))\n return meshes", "def initUniforms(self):\n for name, uniform in self.uniforms.items():\n uniform.locateVariable(self.program, name)", "def check_if_default_shader(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n # skip if this is a representation\n v = staging.get(\"version\")\n if v and Representation.repr_separator in v.take_name:\n progress_controller.complete()\n return\n\n delete_unused_shading_nodes(progress_controller)\n maya_version = int(pm.about(v=1))\n if maya_version > 2019:\n if len(pm.ls(mat=1)) > 3: # [lambert1, particleCloud1, standardSurface1]\n progress_controller.complete()\n raise PublishError(\"Use only lambert1 as the shader!\")\n else:\n if len(pm.ls(mat=1)) > 2:\n progress_controller.complete()\n raise PublishError(\"Use only lambert1 as the shader!\")\n progress_controller.complete()", "def activate(self):\n if self._handle != self._parser.env.get('current_program', False):\n self._parser.env['current_program'] = self._handle\n gl.glUseProgram(self._handle)", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "def appInit(self):\n glutInitDisplayMode( GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH )\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0 )\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n glEnable( GL_LIGHTING )\n glEnable( GL_LIGHT0 )\n\n self.set_lighting()\n\n self.make_simple_scenes()\n self.make_multi_object_scene()" ]
[ "0.7801303", "0.74852675", "0.7035145", "0.69157875", "0.68511426", "0.68024266", "0.67894316", "0.65204686", "0.64844674", "0.64807636", "0.6422864", "0.6374863", "0.6357216", "0.6319503", "0.61505246", "0.61424196", "0.6106217", "0.60896313", "0.60892934", "0.60849136", "0.6044291", "0.604092", "0.60254896", "0.6008178", "0.5987104", "0.5962349", "0.5936018", "0.5915529", "0.5906484", "0.58891714", "0.5839764", "0.5820224", "0.58022887", "0.5772915", "0.57689774", "0.5763413", "0.5758189", "0.5738452", "0.5714959", "0.5708001", "0.57000744", "0.5641961", "0.56231964", "0.55850804", "0.556593", "0.5533709", "0.5507508", "0.5507508", "0.5464688", "0.54477125", "0.5428954", "0.54271114", "0.54227775", "0.53742933", "0.53591776", "0.5342233", "0.531852", "0.5303571", "0.52898705", "0.52689976", "0.5262221", "0.526172", "0.5258122", "0.52261263", "0.520521", "0.52045244", "0.5194047", "0.51779395", "0.5151742", "0.5148055", "0.51462823", "0.5145368", "0.5135454", "0.5132853", "0.51172554", "0.51162946", "0.5091511", "0.5088945", "0.5070687", "0.5054349", "0.50512505", "0.5045735", "0.5034706", "0.5033545", "0.50315976", "0.50085336", "0.5006345", "0.49960372", "0.49852356", "0.49826896", "0.49526173", "0.49433333", "0.49401867", "0.49401867", "0.49343058", "0.49309474", "0.49189165", "0.49037224", "0.48983514", "0.48827764" ]
0.7719651
1